slug
stringlengths 15
15
| content
listlengths 1
129
| rawContent
stringlengths 1
2k
| author
dict | attachments
listlengths 0
49
| mentions
listlengths 0
49
| reactions
listlengths 0
12
| publishedAt
stringlengths 24
24
| updatedAt
stringlengths 24
24
| commentators
listlengths 0
52
| url
stringlengths 25
46
| totalUniqueImpressions
int64 1
42.1k
⌀ | numComments
int64 0
621
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
415480285355699 | [
{
"type": "text",
"value": "anychat",
"raw": "anychat",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "supports chatgpt, gemini, perplexity, claude, meta llama, grok all in one app",
"raw": "supports chatgpt, gemini, perplexity, claude, meta llama, grok all in one app",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "try it out there: ",
"raw": "try it out there: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/akhaliq/anychat",
"href": null,
"resource": {
"type": "space",
"id": "akhaliq/anychat",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/akhaliq/anychat",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | anychat
supports chatgpt, gemini, perplexity, claude, meta llama, grok all in one app
try it out there: https://huggingface.co/spaces/akhaliq/anychat
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1674929746905-60f1abe7544c2adfd699860c.jpeg",
"fullname": "AK",
"name": "akhaliq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5205,
"isFollowing": false
} | [] | [] | [] | 2024-11-24T15:39:51.000Z | 2024-11-24T15:39:51.254Z | [] | /posts/akhaliq/415480285355699 | null | 0 |
573286768557034 | [
{
"type": "text",
"value": "Interesting long read from ",
"raw": "Interesting long read from ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@evanmiller-anthropic",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "evanmiller-anthropic",
"label": null,
"lang": null
},
{
"type": "text",
"value": " on having a better founded statistical approach to Language Model Evaluations:",
"raw": " on having a better founded statistical approach to Language Model Evaluations:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.anthropic.com/research/statistical-approach-to-model-evals",
"href": "https://www.anthropic.com/research/statistical-approach-to-model-evals",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Worth a read if you're into LLM evaluations!",
"raw": "Worth a read if you're into LLM evaluations!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Cc ",
"raw": "Cc ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@clefourrier",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "clefourrier",
"label": null,
"lang": null
}
] | Interesting long read from @evanmiller-anthropic on having a better founded statistical approach to Language Model Evaluations:
https://www.anthropic.com/research/statistical-approach-to-model-evals
Worth a read if you're into LLM evaluations!
Cc @clefourrier | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg",
"fullname": "Thomas Wolf",
"name": "thomwolf",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 704,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5df7e9e5da6d0311fd3d53f9/UOyX5evzJg2CVMd8xoqnb.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1644340617257-noauth.png",
"fullname": "Clémentine Fourrier",
"name": "clefourrier",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 459
},
{
"avatarUrl": "/avatars/04591248ad3ace7b5f1122ecddc7efe8.svg",
"fullname": "Evan Miller",
"name": "evanmiller-anthropic",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1
}
] | [] | 2024-11-24T15:29:59.000Z | 2024-11-24T15:31:35.045Z | [] | /posts/thomwolf/573286768557034 | null | 0 |
454790754502988 | [
{
"type": "text",
"value": "For those who want to try out the new ",
"raw": "For those who want to try out the new ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev",
"href": null,
"resource": {
"type": "model",
"id": "black-forest-labs/FLUX.1-Redux-dev",
"discussionNum": null
},
"url": "https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can do this from my latest spaces ",
"raw": "You can do this from my latest spaces ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/MohamedRashad/Flux-Redux",
"href": null,
"resource": {
"type": "space",
"id": "MohamedRashad/Flux-Redux",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/MohamedRashad/Flux-Redux",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | For those who want to try out the new https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev
You can do this from my latest spaces https://huggingface.co/spaces/MohamedRashad/Flux-Redux | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1628885133347-6116d0584ef9fdfbf45dc4d9.jpeg",
"fullname": "Mohamed Rashad",
"name": "MohamedRashad",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 141,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-24T14:03:43.000Z | 2024-11-24T14:03:43.928Z | [] | /posts/MohamedRashad/454790754502988 | 35 | 0 |
195659728745561 | [
{
"type": "text",
"value": "Estamos tratando de unir, aunar fuerzas y cooperar en experimentos de IA en América Latina. Te invito a unirte a nosotros en «LatinAI». La idea es compartir y organizar espacios, modelos y conjuntos de datos en español/portugués/guaraní/mapuche o ingles para el desarrollo en América Latina.",
"raw": "Estamos tratando de unir, aunar fuerzas y cooperar en experimentos de IA en América Latina. Te invito a unirte a nosotros en «LatinAI». La idea es compartir y organizar espacios, modelos y conjuntos de datos en español/portugués/guaraní/mapuche o ingles para el desarrollo en América Latina.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Siéntete libre de unirte a la organización : ",
"raw": "Siéntete libre de unirte a la organización : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/LatinAI",
"href": "https://huggingface.co/LatinAI",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "---",
"raw": "---",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We are trying to unite, join forces and cooperate in AI experiments in Latin America. We invite you to join us in “LatinAI”. The idea is to share and organize spaces, models and datasets in Spanish/Portuguese/Guarani/Mapuche or English for development in Latin America.",
"raw": "We are trying to unite, join forces and cooperate in AI experiments in Latin America. We invite you to join us in “LatinAI”. The idea is to share and organize spaces, models and datasets in Spanish/Portuguese/Guarani/Mapuche or English for development in Latin America.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Feel free to join the organization : ",
"raw": "Feel free to join the organization : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/LatinAI",
"href": "https://huggingface.co/LatinAI",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Estamos tratando de unir, aunar fuerzas y cooperar en experimentos de IA en América Latina. Te invito a unirte a nosotros en «LatinAI». La idea es compartir y organizar espacios, modelos y conjuntos de datos en español/portugués/guaraní/mapuche o ingles para el desarrollo en América Latina.
Siéntete libre de unirte a la organización : https://huggingface.co/LatinAI
---
We are trying to unite, join forces and cooperate in AI experiments in Latin America. We invite you to join us in “LatinAI”. The idea is to share and organize spaces, models and datasets in Spanish/Portuguese/Guarani/Mapuche or English for development in Latin America.
Feel free to join the organization : https://huggingface.co/LatinAI | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65665c2af450504854d60806/_YXDkUoXTrIdPFf94rRh0.jpeg",
"fullname": "Ramon Mayor Martins",
"name": "rmayormartins",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 22,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-24T13:43:00.000Z | 2024-11-24T13:43:00.808Z | [] | /posts/rmayormartins/195659728745561 | 49 | 0 |
418420597611798 | [
{
"type": "text",
"value": "Sorry, I just cannot get the hype behind F5 TTS. It has now gathered a thousand votes in the TTS Arena fork and **has remained in #8 spot** in the _mostly_ Open TTS adversaries.",
"raw": "Sorry, I just cannot get the hype behind F5 TTS. It has now gathered a thousand votes in the TTS Arena fork and **has remained in #8 spot** in the _mostly_ Open TTS adversaries.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The voice sample used is the same as XTTS. F5 has so far been unstable, being unemotional/monotone/depressed and mispronouncing words (_awestruck_).",
"raw": "The voice sample used is the same as XTTS. F5 has so far been unstable, being unemotional/monotone/depressed and mispronouncing words (_awestruck_).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you have suggestions please give feedback in the following thread:",
"raw": "If you have suggestions please give feedback in the following thread:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/mrfakename/E2-F5-TTS/discussions/32",
"href": null,
"resource": {
"type": "space",
"id": "mrfakename/E2-F5-TTS",
"discussionNum": 32
},
"url": "https://huggingface.co/spaces/mrfakename/E2-F5-TTS/discussions/32",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Sorry, I just cannot get the hype behind F5 TTS. It has now gathered a thousand votes in the TTS Arena fork and **has remained in #8 spot** in the _mostly_ Open TTS adversaries.
The voice sample used is the same as XTTS. F5 has so far been unstable, being unemotional/monotone/depressed and mispronouncing words (_awestruck_).
If you have suggestions please give feedback in the following thread:
https://huggingface.co/spaces/mrfakename/E2-F5-TTS/discussions/32 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d52e0c4e5642795617f668/ztXLrdFz3gkUJUIIQXfHo.png",
"fullname": "Yanis L",
"name": "Pendrokar",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 15,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63d52e0c4e5642795617f668/ZDVB0mKa7SNvAQ1xcGEPH.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-24T11:25:35.000Z | 2024-11-24T11:25:35.816Z | [] | /posts/Pendrokar/418420597611798 | 94 | 0 |
319625675629271 | [
{
"type": "text",
"value": "Maybe that post I showed the other day with my Hyperbolic Embeddings getting to perfect loss with RAdam was a one-time fluke, bad test dataset, etc.? Anotha' one! I gave it a test set a PhD student would struggle with. This model is a bit more souped up. Major callouts of the model: High Dimensional Encoding (HDC), Hyperbolic Embeddings, Entropix. Link to the Colab Notebook: ",
"raw": "Maybe that post I showed the other day with my Hyperbolic Embeddings getting to perfect loss with RAdam was a one-time fluke, bad test dataset, etc.? Anotha' one! I gave it a test set a PhD student would struggle with. This model is a bit more souped up. Major callouts of the model: High Dimensional Encoding (HDC), Hyperbolic Embeddings, Entropix. Link to the Colab Notebook: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://colab.research.google.com/drive/1mS-uxhufx-h7eZXL0ZwPMAAXHqSeGZxX?usp=sharing",
"href": "https://colab.research.google.com/drive/1mS-uxhufx-h7eZXL0ZwPMAAXHqSeGZxX?usp=sharing",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Maybe that post I showed the other day with my Hyperbolic Embeddings getting to perfect loss with RAdam was a one-time fluke, bad test dataset, etc.? Anotha' one! I gave it a test set a PhD student would struggle with. This model is a bit more souped up. Major callouts of the model: High Dimensional Encoding (HDC), Hyperbolic Embeddings, Entropix. Link to the Colab Notebook: https://colab.research.google.com/drive/1mS-uxhufx-h7eZXL0ZwPMAAXHqSeGZxX?usp=sharing | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/z_vgKFqgTyfz3LxuffCUB.png"
}
] | [] | [] | 2024-11-24T08:53:52.000Z | 2024-11-24T09:35:35.833Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg",
"fullname": "Firstname Lastname",
"name": "takeraparterer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 29,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
}
] | /posts/TuringsSolutions/319625675629271 | 143 | 4 |
102132857217055 | [
{
"type": "text",
"value": "I created bodybuilder and playful AI this week. Try them!",
"raw": "I created bodybuilder and playful AI this week. Try them!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/migueldeguzmandev/bodybuilder_ai",
"href": null,
"resource": {
"type": "model",
"id": "migueldeguzmandev/bodybuilder_ai",
"discussionNum": null
},
"url": "https://huggingface.co/migueldeguzmandev/bodybuilder_ai",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/migueldeguzmandev/playful_ai",
"href": null,
"resource": {
"type": "model",
"id": "migueldeguzmandev/playful_ai",
"discussionNum": null
},
"url": "https://huggingface.co/migueldeguzmandev/playful_ai",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I created bodybuilder and playful AI this week. Try them!
https://huggingface.co/migueldeguzmandev/bodybuilder_ai
https://huggingface.co/migueldeguzmandev/playful_ai | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6436066c76dbfd731bce1a44/6Kh5xTS2vMgmzJ-M-pqw7.png",
"fullname": "Miguelito De Guzman",
"name": "migueldeguzmandev",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-24T08:45:15.000Z | 2024-11-24T09:24:00.859Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6436066c76dbfd731bce1a44/6Kh5xTS2vMgmzJ-M-pqw7.png",
"fullname": "Miguelito De Guzman",
"name": "migueldeguzmandev",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/migueldeguzmandev/102132857217055 | 119 | 1 |
538076072707429 | [
{
"type": "text",
"value": "Using AI to teach English as a Foreign Language? EFL teachers often have busy schedules, variable class sizes, and unexpected cancellations. Introducting VocabSova: ",
"raw": "Using AI to teach English as a Foreign Language? EFL teachers often have busy schedules, variable class sizes, and unexpected cancellations. Introducting VocabSova: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/ZennyKenny/VocabSova",
"href": null,
"resource": {
"type": "space",
"id": "ZennyKenny/VocabSova",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/ZennyKenny/VocabSova",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "VocabSova is a simple chatbot interface that helps teachers create topical vocabulary lists, custom worksheets using that vocabulary, and group activities on a defined theme for a specific English-speaking level (according to CEFR international standards).",
"raw": "VocabSova is a simple chatbot interface that helps teachers create topical vocabulary lists, custom worksheets using that vocabulary, and group activities on a defined theme for a specific English-speaking level (according to CEFR international standards).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "There is a great use case for AI in nearly every field, and language learning is a particularly apt domain in my opinion. VocabSova is in active development during its Alpha release, all feedback welcome.",
"raw": "There is a great use case for AI in nearly every field, and language learning is a particularly apt domain in my opinion. VocabSova is in active development during its Alpha release, all feedback welcome.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Using AI to teach English as a Foreign Language? EFL teachers often have busy schedules, variable class sizes, and unexpected cancellations. Introducting VocabSova: https://huggingface.co/spaces/ZennyKenny/VocabSova
VocabSova is a simple chatbot interface that helps teachers create topical vocabulary lists, custom worksheets using that vocabulary, and group activities on a defined theme for a specific English-speaking level (according to CEFR international standards).
There is a great use case for AI in nearly every field, and language learning is a particularly apt domain in my opinion. VocabSova is in active development during its Alpha release, all feedback welcome. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656e3808d4de03a07d116850/JZh4lrjFueJZVqugjoloP.jpeg",
"fullname": "Kenneth Hamilton",
"name": "ZennyKenny",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 33,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-24T04:48:41.000Z | 2024-11-24T04:48:41.173Z | [] | /posts/ZennyKenny/538076072707429 | 209 | 0 |
117573628010199 | [
{
"type": "text",
"value": "Good folks from ",
"raw": "Good folks from ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@amazon",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "amazon",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Stanford",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Stanford",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", and other great institutions have released “A Comprehensive Survey of Hallucination Mitigation Techniques in Large Language Models!”",
"raw": ", and other great institutions have released “A Comprehensive Survey of Hallucination Mitigation Techniques in Large Language Models!”",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This comprehensive survey examines over 32 cutting-edge techniques to combat hallucination in Large Language Models (LLMs). As LLMs become increasingly integral to our daily operations, addressing their tendency to generate ungrounded content is crucial.",
"raw": "This comprehensive survey examines over 32 cutting-edge techniques to combat hallucination in Large Language Models (LLMs). As LLMs become increasingly integral to our daily operations, addressing their tendency to generate ungrounded content is crucial.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Retrieval-Augmented Generation (RAG) Innovations:",
"raw": "Retrieval-Augmented Generation (RAG) Innovations:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Pre-generation retrieval using LLM-Augmenter with Plug-and-Play modules",
"raw": "- Pre-generation retrieval using LLM-Augmenter with Plug-and-Play modules",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Real-time verification through the EVER framework implementing three-stage validation",
"raw": "- Real-time verification through the EVER framework implementing three-stage validation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Post-generation refinement via the RARR system for automated attribution",
"raw": "- Post-generation refinement via the RARR system for automated attribution",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Advanced Decoding Strategies:",
"raw": "Advanced Decoding Strategies:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Context-Aware Decoding (CAD) utilizing contrastive output distribution",
"raw": "- Context-Aware Decoding (CAD) utilizing contrastive output distribution",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- DoLa's innovative approach of contrasting logit differences between transformer layers",
"raw": "- DoLa's innovative approach of contrasting logit differences between transformer layers",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Knowledge Integration Methods:",
"raw": "Knowledge Integration Methods:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- The RHO framework leveraging entity representations and relation predicates",
"raw": "- The RHO framework leveraging entity representations and relation predicates",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- FLEEK's intelligent fact verification system using curated knowledge graphs",
"raw": "- FLEEK's intelligent fact verification system using curated knowledge graphs",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Novel Loss Functions:",
"raw": "Novel Loss Functions:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Text Hallucination Regularization (THR) derived from mutual information",
"raw": "- Text Hallucination Regularization (THR) derived from mutual information",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- The mFACT metric for evaluating faithfulness in multilingual contexts",
"raw": "- The mFACT metric for evaluating faithfulness in multilingual contexts",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This research provides a structured taxonomy for categorizing these mitigation techniques, offering valuable insights for practitioners and researchers working with LLMs.",
"raw": "This research provides a structured taxonomy for categorizing these mitigation techniques, offering valuable insights for practitioners and researchers working with LLMs.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "What are your thoughts on hallucination mitigation in LLMs?",
"raw": "What are your thoughts on hallucination mitigation in LLMs?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Good folks from @amazon, @Stanford, and other great institutions have released “A Comprehensive Survey of Hallucination Mitigation Techniques in Large Language Models!”
This comprehensive survey examines over 32 cutting-edge techniques to combat hallucination in Large Language Models (LLMs). As LLMs become increasingly integral to our daily operations, addressing their tendency to generate ungrounded content is crucial.
Retrieval-Augmented Generation (RAG) Innovations:
- Pre-generation retrieval using LLM-Augmenter with Plug-and-Play modules
- Real-time verification through the EVER framework implementing three-stage validation
- Post-generation refinement via the RARR system for automated attribution
Advanced Decoding Strategies:
- Context-Aware Decoding (CAD) utilizing contrastive output distribution
- DoLa's innovative approach of contrasting logit differences between transformer layers
Knowledge Integration Methods:
- The RHO framework leveraging entity representations and relation predicates
- FLEEK's intelligent fact verification system using curated knowledge graphs
Novel Loss Functions:
- Text Hallucination Regularization (THR) derived from mutual information
- The mFACT metric for evaluating faithfulness in multilingual contexts
This research provides a structured taxonomy for categorizing these mitigation techniques, offering valuable insights for practitioners and researchers working with LLMs.
What are your thoughts on hallucination mitigation in LLMs? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 219,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/k5RoT0IOOG9erjBWJVLOj.jpeg"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-24T02:45:02.000Z | 2024-11-24T14:55:57.726Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/bGo60bmJrLTD0CIERF6GP.png",
"fullname": "I M Weasel",
"name": "imw34531",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
}
] | /posts/singhsidhukuldeep/117573628010199 | 216 | 1 |
378209596329028 | [
{
"type": "text",
"value": "Release a new virtual tryon flux fill finetuning model. Try it here. ",
"raw": "Release a new virtual tryon flux fill finetuning model. Try it here. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/xiaozaa/catvton-flux-alpha",
"href": null,
"resource": {
"type": "model",
"id": "xiaozaa/catvton-flux-alpha",
"discussionNum": null
},
"url": "https://huggingface.co/xiaozaa/catvton-flux-alpha",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Release a new virtual tryon flux fill finetuning model. Try it here.
https://huggingface.co/xiaozaa/catvton-flux-alpha
| {
"avatarUrl": "/avatars/4941f9461c77bb5c5c0b5ec9a6f9efed.svg",
"fullname": "az",
"name": "xiaozaa",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 7,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6401517a8ba76abe4b72b2bf/75K6VNDnzDG9ihG18C2Ux.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6401517a8ba76abe4b72b2bf/mXe4GpJHNwxax0maUHf9j.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6401517a8ba76abe4b72b2bf/eBz7WssbJyWzGURAP_FyI.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6401517a8ba76abe4b72b2bf/yHUR1ZFr25YYFOIhZ7xIm.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6401517a8ba76abe4b72b2bf/O3Vj2Em-dB7ECbIYeQNvv.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"imrankhakwani",
"John6666"
],
"count": 2
}
] | 2024-11-24T02:01:24.000Z | 2024-11-24T02:01:24.673Z | [] | /posts/xiaozaa/378209596329028 | 211 | 0 |
834919494324436 | [
{
"type": "text",
"value": "Repurposed my older AI workstation to a homelab server, it has received 2xV100 + 1xP40",
"raw": "Repurposed my older AI workstation to a homelab server, it has received 2xV100 + 1xP40",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I can reach huge 210k token context size with MegaBeam-Mistral-7B-512k-GGUF ~70+tok/s, or run Llama-3.1-Nemotron-70B-Instruct-HF-GGUF with 50k Context ~10tok/s (V100 only 40k ctx and 15tok/s).",
"raw": "I can reach huge 210k token context size with MegaBeam-Mistral-7B-512k-GGUF ~70+tok/s, or run Llama-3.1-Nemotron-70B-Instruct-HF-GGUF with 50k Context ~10tok/s (V100 only 40k ctx and 15tok/s).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Also able to Lora finetune with similar performace as an RTX3090.",
"raw": "Also able to Lora finetune with similar performace as an RTX3090.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It moved to the garage to no complaints for the noise from the family. Will move to a Rack soon :D",
"raw": "It moved to the garage to no complaints for the noise from the family. Will move to a Rack soon :D",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Repurposed my older AI workstation to a homelab server, it has received 2xV100 + 1xP40
I can reach huge 210k token context size with MegaBeam-Mistral-7B-512k-GGUF ~70+tok/s, or run Llama-3.1-Nemotron-70B-Instruct-HF-GGUF with 50k Context ~10tok/s (V100 only 40k ctx and 15tok/s).
Also able to Lora finetune with similar performace as an RTX3090.
It moved to the garage to no complaints for the noise from the family. Will move to a Rack soon :D
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e6d37e02dee9bcb9d9fa18/os24VYiNCoyth9yQSdv_A.jpeg",
"fullname": "Csaba Kecskemeti",
"name": "csabakecskemeti",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 9,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64e6d37e02dee9bcb9d9fa18/9RauPJdJLt2gDQ081Udxw.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64e6d37e02dee9bcb9d9fa18/34pYYbQd46L04JUC4ZVJ6.jpeg"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-24T00:04:02.000Z | 2024-11-24T04:31:05.379Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/6aIIy7eNLLjepZfp3Aym3.png",
"fullname": "George M",
"name": "ge-or-ge",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e6d37e02dee9bcb9d9fa18/os24VYiNCoyth9yQSdv_A.jpeg",
"fullname": "Csaba Kecskemeti",
"name": "csabakecskemeti",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 9,
"isFollowing": false
}
] | /posts/csabakecskemeti/834919494324436 | 168 | 2 |
308791196500352 | [
{
"type": "text",
"value": "p104-100s are beasts. 8 gigs of VRAM, 12 tok/s on qwen 14b at q4, and 18 tok/s on 7b at q6. best thing - 20 euros each.",
"raw": "p104-100s are beasts. 8 gigs of VRAM, 12 tok/s on qwen 14b at q4, and 18 tok/s on 7b at q6. best thing - 20 euros each.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://furry.engineer/@cappuch/113500349547803802",
"href": "https://furry.engineer/@cappuch/113500349547803802",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | p104-100s are beasts. 8 gigs of VRAM, 12 tok/s on qwen 14b at q4, and 18 tok/s on 7b at q6. best thing - 20 euros each.
https://furry.engineer/@cappuch/113500349547803802 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64da645be42fba08b88d0315/dcBKWq3d3X9QKQbtf8t46.jpeg",
"fullname": "Mikus",
"name": "cappuch",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 9,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🤯",
"users": [
"YaTharThShaRma999",
"AtAndDev",
"John6666"
],
"count": 3
},
{
"reaction": "😎",
"users": [
"csabakecskemeti",
"AtAndDev"
],
"count": 2
}
] | 2024-11-23T21:16:14.000Z | 2024-11-23T21:16:14.425Z | [] | /posts/cappuch/308791196500352 | 453 | 0 |
216590365251377 | [
{
"type": "text",
"value": "SAM: I can segment anything!",
"raw": "SAM: I can segment anything!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "SAMURAI: Hold my sake while I track it through time and space without even training 🎯",
"raw": "SAMURAI: Hold my sake while I track it through time and space without even training 🎯",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Researchers really said what if we gave SAM object permanence and it worked 🤯",
"raw": "Researchers really said what if we gave SAM object permanence and it worked 🤯",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.aimodels.fyi/papers/arxiv/samurai-adapting-segment-anything-model-zero-shot",
"href": "https://www.aimodels.fyi/papers/arxiv/samurai-adapting-segment-anything-model-zero-shot",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | SAM: I can segment anything!
SAMURAI: Hold my sake while I track it through time and space without even training 🎯
Researchers really said what if we gave SAM object permanence and it worked 🤯
https://www.aimodels.fyi/papers/arxiv/samurai-adapting-segment-anything-model-zero-shot | {
"avatarUrl": "/avatars/0bc16a7447cd71ac18828a678313bd83.svg",
"fullname": "Mike Young",
"name": "mikelabs",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 10,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"YaTharThShaRma999",
"mikelabs"
],
"count": 2
},
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-23T17:37:17.000Z | 2024-11-24T00:13:47.361Z | [
{
"avatarUrl": "/avatars/b2b3fe650c1fcc689d74e66f84aa5e5f.svg",
"fullname": "fdgt fgh",
"name": "gfdhujykrdr",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/mikelabs/216590365251377 | 580 | 1 |
782676184452029 | [
{
"type": "text",
"value": "📢 If you were earlier interested in quick translator application for bunch of texts with spans of fixed parts that tolerant for translation, then this post might be relevant! Delighted to share a bulk_translate -- a framework for automatic texts translation with the pre-anotated fixed spans. ",
"raw": "📢 If you were earlier interested in quick translator application for bunch of texts with spans of fixed parts that tolerant for translation, then this post might be relevant! Delighted to share a bulk_translate -- a framework for automatic texts translation with the pre-anotated fixed spans. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📦 ",
"raw": "📦 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://pypi.org/project/bulk-translate/",
"href": "https://pypi.org/project/bulk-translate/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌟 ",
"raw": "🌟 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/nicolay-r/bulk-translate",
"href": "https://github.com/nicolay-r/bulk-translate",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔑 Spans allows you to control your objects in texts, so that objects would be tollerant to translator. By default it provides implementation for GoogleTranslate.",
"raw": "🔑 Spans allows you to control your objects in texts, so that objects would be tollerant to translator. By default it provides implementation for GoogleTranslate.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "bulk_translate features: ",
"raw": "bulk_translate features: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ Native Implementation of two translation modes:",
"raw": "✅ Native Implementation of two translation modes:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " - fast-mode: exploits extra chars for grouping text parts into single batch",
"raw": " - fast-mode: exploits extra chars for grouping text parts into single batch",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " - accurate: pefroms individual translation of each text part.",
"raw": " - accurate: pefroms individual translation of each text part.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ No strings: you're free to adopt any LM / LLM backend.",
"raw": "✅ No strings: you're free to adopt any LM / LLM backend.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Support googletrans by default.",
"raw": "Support googletrans by default.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The initial release of the project supports fixed spans as text parts wrapped in square brackets [] with non inner space characters.",
"raw": "The initial release of the project supports fixed spans as text parts wrapped in square brackets [] with non inner space characters.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can play with your data in CSV here on GoogleColab:",
"raw": "You can play with your data in CSV here on GoogleColab:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📒 ",
"raw": "📒 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://colab.research.google.com/github/nicolay-r/bulk-translate/blob/master/bulk_translate_demo.ipynb",
"href": "https://colab.research.google.com/github/nicolay-r/bulk-translate/blob/master/bulk_translate_demo.ipynb",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👏 This project is based on AREkit 0.25.1 pipelines for deployment lm-based workflows: ",
"raw": "👏 This project is based on AREkit 0.25.1 pipelines for deployment lm-based workflows: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/nicolay-r/AREkit",
"href": "https://github.com/nicolay-r/AREkit",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 📢 If you were earlier interested in quick translator application for bunch of texts with spans of fixed parts that tolerant for translation, then this post might be relevant! Delighted to share a bulk_translate -- a framework for automatic texts translation with the pre-anotated fixed spans.
📦 https://pypi.org/project/bulk-translate/
🌟 https://github.com/nicolay-r/bulk-translate
🔑 Spans allows you to control your objects in texts, so that objects would be tollerant to translator. By default it provides implementation for GoogleTranslate.
bulk_translate features:
✅ Native Implementation of two translation modes:
- fast-mode: exploits extra chars for grouping text parts into single batch
- accurate: pefroms individual translation of each text part.
✅ No strings: you're free to adopt any LM / LLM backend.
Support googletrans by default.
The initial release of the project supports fixed spans as text parts wrapped in square brackets [] with non inner space characters.
You can play with your data in CSV here on GoogleColab:
📒 https://colab.research.google.com/github/nicolay-r/bulk-translate/blob/master/bulk_translate_demo.ipynb
👏 This project is based on AREkit 0.25.1 pipelines for deployment lm-based workflows:
https://github.com/nicolay-r/AREkit | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e62d11d27a8292c3637f86/aptDeBHpCJxcREj6KPLN1.jpeg",
"fullname": "Nicolay Rusnachenko",
"name": "nicolay-r",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 49,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/LcYCqtHaZyyaQaQGGa3ri.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/ZDaa1LmbrZJJOvI8P1_nR.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/n_5T_gXwLWM4raIUkClu6.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64e62d11d27a8292c3637f86/_4SbYk_XtxncAZnwdy044.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-23T12:33:02.000Z | 2024-11-23T12:48:04.858Z | [] | /posts/nicolay-r/782676184452029 | 430 | 0 |
642264011690230 | [
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/hexgrad/Kokoro-TTS",
"href": null,
"resource": {
"type": "space",
"id": "hexgrad/Kokoro-TTS",
"discussionNum": null
},
"url": "https://hf.co/spaces/hexgrad/Kokoro-TTS",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " just got an upgrade that substantially improves TTS naturalness for short bursts while maintaining parity for longer utterances! 🔥",
"raw": " just got an upgrade that substantially improves TTS naturalness for short bursts while maintaining parity for longer utterances! 🔥",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Read more and listen to before/after audio samples at ",
"raw": "Read more and listen to before/after audio samples at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://hf.co/blog/hexgrad/kokoro-short-burst-upgrade",
"href": "https://hf.co/blog/hexgrad/kokoro-short-burst-upgrade",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "(Probably would have made that Article a Post instead, if audio could be embedded into Posts.)",
"raw": "(Probably would have made that Article a Post instead, if audio could be embedded into Posts.)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | https://hf.co/spaces/hexgrad/Kokoro-TTS just got an upgrade that substantially improves TTS naturalness for short bursts while maintaining parity for longer utterances! 🔥
Read more and listen to before/after audio samples at https://hf.co/blog/hexgrad/kokoro-short-burst-upgrade
(Probably would have made that Article a Post instead, if audio could be embedded into Posts.) | {
"avatarUrl": "/avatars/02074f60a2ef445a29343ed90a303cc6.svg",
"fullname": "Hexgrad",
"name": "hexgrad",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 20,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"ai-everyday",
"victor",
"AtAndDev",
"Norod78"
],
"count": 5
},
{
"reaction": "🔥",
"users": [
"prithivMLmods",
"AtAndDev"
],
"count": 2
}
] | 2024-11-23T00:00:15.000Z | 2024-11-23T18:58:44.764Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/66c26b6fb01b19d8c3c2467b/HIcQYcU6rOilwbuRCRStm.jpeg",
"fullname": "DV",
"name": "Delta-Vector",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 19,
"isFollowing": false
}
] | /posts/hexgrad/642264011690230 | 1,115 | 1 |
622669072793033 | [
{
"type": "text",
"value": "🦋 Hug the butterfly! You can now add your Bluesky handle to your Hugging Face profile! ✨",
"raw": "🦋 Hug the butterfly! You can now add your Bluesky handle to your Hugging Face profile! ✨",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🦋 Hug the butterfly! You can now add your Bluesky handle to your Hugging Face profile! ✨ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/jG8vFk8ANl3LzPPYgSnYn.jpeg"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"jsulz",
"arthurspapa",
"OmbelineM",
"enzostvs",
"John6666",
"gordy12gg",
"funi58480",
"Clausss"
],
"count": 8
},
{
"reaction": "🤗",
"users": [
"John6666",
"monsoon-nlp"
],
"count": 2
}
] | 2024-11-22T23:14:41.000Z | 2024-11-22T23:14:41.790Z | [] | /posts/fdaudens/622669072793033 | 1,225 | 0 |
153064070369037 | [
{
"type": "text",
"value": "NVIDIA Labs developed SANA model weights and Gradio demo app published —Check out this amazing new Text to Image model by NVIDIA",
"raw": "NVIDIA Labs developed SANA model weights and Gradio demo app published —Check out this amazing new Text to Image model by NVIDIA",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Official repo : ",
"raw": "Official repo : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/NVlabs/Sana",
"href": "https://github.com/NVlabs/Sana",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1-Click Windows, RunPod, Massed Compute installers and free Kaggle notebook : ",
"raw": "1-Click Windows, RunPod, Massed Compute installers and free Kaggle notebook : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/116474081",
"href": "https://www.patreon.com/posts/116474081",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can follow instructions on the repository to install and use locally. I tested on my Windows RTX 3060 and 3090 GPUs.",
"raw": "You can follow instructions on the repository to install and use locally. I tested on my Windows RTX 3060 and 3090 GPUs.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I have tested some speeds and VRAM usage too",
"raw": "I have tested some speeds and VRAM usage too",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Uses 9.5 GB VRAM but someone reported works good on 8 GB GPUs too",
"raw": "Uses 9.5 GB VRAM but someone reported works good on 8 GB GPUs too",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Default settings per image speeds as below",
"raw": "Default settings per image speeds as below",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Free Kaggle Account Notebook on T4 GPU : 15 second",
"raw": "Free Kaggle Account Notebook on T4 GPU : 15 second",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "RTX 3060 (12 GB) : 9.5 second",
"raw": "RTX 3060 (12 GB) : 9.5 second",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "RTX 3090 : 4 second",
"raw": "RTX 3090 : 4 second",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "RTX 4090 : 2 second",
"raw": "RTX 4090 : 2 second",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "More info : ",
"raw": "More info : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://nvlabs.github.io/Sana/",
"href": "https://nvlabs.github.io/Sana/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Works great on RunPod and Massed Compute as well (cloud)",
"raw": "Works great on RunPod and Massed Compute as well (cloud)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Sana : Efficient High-Resolution Image Synthesis",
"raw": "Sana : Efficient High-Resolution Image Synthesis",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "with Linear Diffusion Transformer",
"raw": "with Linear Diffusion Transformer",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "About Sana — Taken from official repo",
"raw": "About Sana — Taken from official repo",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We introduce Sana, a text-to-image framework that can efficiently generate images up to 4096 × 4096 resolution. Sana can synthesize high-resolution, high-quality images with strong text-image alignment at a remarkably fast speed, deployable on laptop GPU. Core designs include: Deep compression autoencoder: unlike traditional AEs, which compress images only 8×, we trained an AE that can compress images 32×, effectively reducing the number of latent tokens. Linear DiT: we replace all vanilla attention in DiT with linear attention, which is more efficient at high resolutions without sacrificing quality. Decoder-only text encoder: we replaced T5 with modern decoder-only small LLM as the text encoder and designed complex human instruction with in-context learning to enhance the image-text alignment. Efficient training and sampling: we propose Flow-DPM-Solver to reduce sampling steps, with efficient caption labeling and selection to accelerate convergence.",
"raw": "We introduce Sana, a text-to-image framework that can efficiently generate images up to 4096 × 4096 resolution. Sana can synthesize high-resolution, high-quality images with strong text-image alignment at a remarkably fast speed, deployable on laptop GPU. Core designs include: Deep compression autoencoder: unlike traditional AEs, which compress images only 8×, we trained an AE that can compress images 32×, effectively reducing the number of latent tokens. Linear DiT: we replace all vanilla attention in DiT with linear attention, which is more efficient at high resolutions without sacrificing quality. Decoder-only text encoder: we replaced T5 with modern decoder-only small LLM as the text encoder and designed complex human instruction with in-context learning to enhance the image-text alignment. Efficient training and sampling: we propose Flow-DPM-Solver to reduce sampling steps, with efficient caption labeling and selection to accelerate convergence.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | NVIDIA Labs developed SANA model weights and Gradio demo app published —Check out this amazing new Text to Image model by NVIDIA
Official repo : https://github.com/NVlabs/Sana
1-Click Windows, RunPod, Massed Compute installers and free Kaggle notebook : https://www.patreon.com/posts/116474081
You can follow instructions on the repository to install and use locally. I tested on my Windows RTX 3060 and 3090 GPUs.
I have tested some speeds and VRAM usage too
Uses 9.5 GB VRAM but someone reported works good on 8 GB GPUs too
Default settings per image speeds as below
Free Kaggle Account Notebook on T4 GPU : 15 second
RTX 3060 (12 GB) : 9.5 second
RTX 3090 : 4 second
RTX 4090 : 2 second
More info : https://nvlabs.github.io/Sana/
Works great on RunPod and Massed Compute as well (cloud)
Sana : Efficient High-Resolution Image Synthesis
with Linear Diffusion Transformer
About Sana — Taken from official repo
We introduce Sana, a text-to-image framework that can efficiently generate images up to 4096 × 4096 resolution. Sana can synthesize high-resolution, high-quality images with strong text-image alignment at a remarkably fast speed, deployable on laptop GPU. Core designs include: Deep compression autoencoder: unlike traditional AEs, which compress images only 8×, we trained an AE that can compress images 32×, effectively reducing the number of latent tokens. Linear DiT: we replace all vanilla attention in DiT with linear attention, which is more efficient at high resolutions without sacrificing quality. Decoder-only text encoder: we replaced T5 with modern decoder-only small LLM as the text encoder and designed complex human instruction with in-context learning to enhance the image-text alignment. Efficient training and sampling: we propose Flow-DPM-Solver to reduce sampling steps, with efficient caption labeling and selection to accelerate convergence.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 376,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/KqOdD2PyaWMen3kxHEst1.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/hG4uxvhJ8TEYXtsP26mCs.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Bbz9kqFRqGjZZyJeddsb7.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/ixI6n8ENWGV-9MSYx72Bs.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/p5tFos_O2wZiK2bHetzT_.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/VBvxObWKdBQfLen0rbsaG.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/c3RNric-IiRyE9gejNhBF.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/k7VQ4_0Sdcxc_OGzGIhFk.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/zRAGD2qVfCsakhTm4BsnA.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Kza50gOKwIoI5fhQTCySH.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/KZrbrog_x7cwk1OG54cPX.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Qj7m4Al2G8MsgR1Fony_O.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/h9ccVFDJMo8zIjOZ1lyXW.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/fC3N8ERv_Rgn3OzcHV35N.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/SIZb4w3xStNrFSRLxGpcj.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/S5EYUHhP5jlZfuO005gIe.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/lgpMVt9NzhzS5GuWzbGy9.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/4OODIr6fREfCoLk-8CFZE.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/gagsaM94j7JPRbctrHVeA.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Z2Cq0mUFzVjz-xwiN39MT.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
},
{
"reaction": "🚀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-22T22:47:00.000Z | 2024-11-22T22:47:00.331Z | [] | /posts/MonsterMMORPG/153064070369037 | 440 | 0 |
558505558371853 | [
{
"type": "text",
"value": "All wisper or transcriptions projects spaces (including Zero GPUs plans) are very slow or have many quotas bugs or processing errors. I tested all... anything can transcribe a single 3 min short audio file!!! How it´s possible???",
"raw": "All wisper or transcriptions projects spaces (including Zero GPUs plans) are very slow or have many quotas bugs or processing errors. I tested all... anything can transcribe a single 3 min short audio file!!! How it´s possible???",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | All wisper or transcriptions projects spaces (including Zero GPUs plans) are very slow or have many quotas bugs or processing errors. I tested all... anything can transcribe a single 3 min short audio file!!! How it´s possible??? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/663e7c386304d377fca8552c/__GKPNKAhSi6ZcEk8XoBD.jpeg",
"fullname": "Edney Silva",
"name": "ednsinf",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-22T22:41:20.000Z | 2024-11-23T16:42:20.668Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6640bbd0220cfa8cbfdce080/wiAHUu5ewawyipNs0YFBR.png",
"fullname": "John Smith",
"name": "John6666",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 398,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/663e7c386304d377fca8552c/__GKPNKAhSi6ZcEk8XoBD.jpeg",
"fullname": "Edney Silva",
"name": "ednsinf",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c82779fdf94f80cdb5020504f83c818b.svg",
"fullname": "Yatharth Sharma",
"name": "YaTharThShaRma999",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 14,
"isFollowing": false
}
] | /posts/ednsinf/558505558371853 | 276 | 3 |
673896178494185 | [
{
"type": "text",
"value": "I'm currently on a push to expand the scope of image based datasets on the Hub. There's certainly a lot already, but for anyone who's looked closely, there's not a whole lot of standardization. I am to fix that, datasets under the ",
"raw": "I'm currently on a push to expand the scope of image based datasets on the Hub. There's certainly a lot already, but for anyone who's looked closely, there's not a whole lot of standardization. I am to fix that, datasets under the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/timm",
"href": "https://huggingface.co/timm",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/pixparse",
"href": "https://huggingface.co/pixparse",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " orgs will serve as canonical examples for various task / modality combinations and be useable without fuss in libraries like ",
"raw": " orgs will serve as canonical examples for various task / modality combinations and be useable without fuss in libraries like ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "inline_code",
"value": null,
"raw": "`timm`",
"href": null,
"resource": null,
"url": null,
"code": "timm",
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "inline_code",
"value": null,
"raw": "`OpenCLIP`",
"href": null,
"resource": null,
"url": null,
"code": "OpenCLIP",
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ", and hopefully more.",
"raw": ", and hopefully more.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I just uploaded the first multi-label dataset that I'll support with ",
"raw": "I just uploaded the first multi-label dataset that I'll support with ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "inline_code",
"value": null,
"raw": "`timm`",
"href": null,
"resource": null,
"url": null,
"code": "timm",
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " scripts soon: ",
"raw": " scripts soon: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/timm/plant-pathology-2021",
"href": null,
"resource": {
"type": "dataset",
"id": "timm/plant-pathology-2021",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/timm/plant-pathology-2021",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Next up object detection & segmentation! I've got an annotation spec sorted out, a lot of datasets ready to rip, and yeah that means ",
"raw": "Next up object detection & segmentation! I've got an annotation spec sorted out, a lot of datasets ready to rip, and yeah that means ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "inline_code",
"value": null,
"raw": "`timm`",
"href": null,
"resource": null,
"url": null,
"code": "timm",
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " support for object detection, eventually segmentation, is finally under development :O",
"raw": " support for object detection, eventually segmentation, is finally under development :O",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I'm currently on a push to expand the scope of image based datasets on the Hub. There's certainly a lot already, but for anyone who's looked closely, there's not a whole lot of standardization. I am to fix that, datasets under the https://huggingface.co/timm and https://huggingface.co/pixparse orgs will serve as canonical examples for various task / modality combinations and be useable without fuss in libraries like `timm`, `OpenCLIP`, and hopefully more.
I just uploaded the first multi-label dataset that I'll support with `timm` scripts soon: https://huggingface.co/datasets/timm/plant-pathology-2021
Next up object detection & segmentation! I've got an annotation spec sorted out, a lot of datasets ready to rip, and yeah that means `timm` support for object detection, eventually segmentation, is finally under development :O
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1667002643224-604a5184dca2c7ac7508b849.jpeg",
"fullname": "Ross Wightman",
"name": "rwightman",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 221,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"davanstrien",
"lhoestq",
"fgdrfgrgrdgdr"
],
"count": 4
}
] | 2024-11-22T21:19:23.000Z | 2024-11-22T21:19:23.859Z | [] | /posts/rwightman/673896178494185 | 720 | 0 |
421660573639613 | [
{
"type": "text",
"value": "Trace LLM calls with Arize AI's Phoenix observability dashboards on Hugging Face Spaces! 🚀",
"raw": "Trace LLM calls with Arize AI's Phoenix observability dashboards on Hugging Face Spaces! 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✨ I just added a new recipe to the Open-Source AI Cookbook that shows you how to:",
"raw": "✨ I just added a new recipe to the Open-Source AI Cookbook that shows you how to:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1️⃣ Deploy Phoenix on HF Spaces with persistent storage in a few clicks",
"raw": "1️⃣ Deploy Phoenix on HF Spaces with persistent storage in a few clicks",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "2️⃣ Configure LLM tracing with the 𝗦𝗲𝗿𝘃𝗲𝗿𝗹𝗲𝘀𝘀 𝗜𝗻𝗳𝗲𝗿𝗲𝗻𝗰𝗲 𝗔𝗣𝗜",
"raw": "2️⃣ Configure LLM tracing with the 𝗦𝗲𝗿𝘃𝗲𝗿𝗹𝗲𝘀𝘀 𝗜𝗻𝗳𝗲𝗿𝗲𝗻𝗰𝗲 𝗔𝗣𝗜",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3️⃣ Observe multi-agent application runs with the CrewAI integration",
"raw": "3️⃣ Observe multi-agent application runs with the CrewAI integration",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "𝗢𝗯𝘀𝗲𝗿𝘃𝗮𝗯𝗶𝗹𝗶𝘁𝘆 𝗶𝘀 𝗰𝗿𝘂𝗰𝗶𝗮𝗹 for building robust LLM apps.",
"raw": "𝗢𝗯𝘀𝗲𝗿𝘃𝗮𝗯𝗶𝗹𝗶𝘁𝘆 𝗶𝘀 𝗰𝗿𝘂𝗰𝗶𝗮𝗹 for building robust LLM apps.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Phoenix makes it easy to visualize trace data, evaluate performance, and track down issues. Give it a try!",
"raw": "Phoenix makes it easy to visualize trace data, evaluate performance, and track down issues. Give it a try!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Cookbook recipe: ",
"raw": "🔗 Cookbook recipe: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/learn/cookbook/en/phoenix_observability_on_hf_spaces",
"href": "https://huggingface.co/learn/cookbook/en/phoenix_observability_on_hf_spaces",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Phoenix docs: ",
"raw": "🔗 Phoenix docs: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://docs.arize.com/phoenix",
"href": "https://docs.arize.com/phoenix",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Trace LLM calls with Arize AI's Phoenix observability dashboards on Hugging Face Spaces! 🚀
✨ I just added a new recipe to the Open-Source AI Cookbook that shows you how to:
1️⃣ Deploy Phoenix on HF Spaces with persistent storage in a few clicks
2️⃣ Configure LLM tracing with the 𝗦𝗲𝗿𝘃𝗲𝗿𝗹𝗲𝘀𝘀 𝗜𝗻𝗳𝗲𝗿𝗲𝗻𝗰𝗲 𝗔𝗣𝗜
3️⃣ Observe multi-agent application runs with the CrewAI integration
𝗢𝗯𝘀𝗲𝗿𝘃𝗮𝗯𝗶𝗹𝗶𝘁𝘆 𝗶𝘀 𝗰𝗿𝘂𝗰𝗶𝗮𝗹 for building robust LLM apps.
Phoenix makes it easy to visualize trace data, evaluate performance, and track down issues. Give it a try!
🔗 Cookbook recipe: https://huggingface.co/learn/cookbook/en/phoenix_observability_on_hf_spaces
🔗 Phoenix docs: https://docs.arize.com/phoenix | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61d375fd733d3a83ecd1bba9/oIXwvvs1-HaCnJXMCZgkc.jpeg",
"fullname": "Andrew Reed",
"name": "andrewrreed",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 106,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"funi58480"
],
"count": 2
}
] | 2024-11-22T20:52:04.000Z | 2024-11-22T20:52:04.474Z | [] | /posts/andrewrreed/421660573639613 | 631 | 0 |
161512292032853 | [
{
"type": "text",
"value": "If I am correct and the LLM model changes the 'shape' of the data as it learns, then I should be able to track and utilize those shape changes as a backpropagation training mechanism, right? Well guess what, I can do that! Entropy, Sparsity, and Density, this is how I can measure the shape of the data the LLM model is creating. Nodes, Clusters, and Edges, these are the mechanisms within the neural network the LLM model updates as it learns these concepts. I measure the effects of these updates, via Entropy, Sparsity, and Density. Check out more in this video: ",
"raw": "If I am correct and the LLM model changes the 'shape' of the data as it learns, then I should be able to track and utilize those shape changes as a backpropagation training mechanism, right? Well guess what, I can do that! Entropy, Sparsity, and Density, this is how I can measure the shape of the data the LLM model is creating. Nodes, Clusters, and Edges, these are the mechanisms within the neural network the LLM model updates as it learns these concepts. I measure the effects of these updates, via Entropy, Sparsity, and Density. Check out more in this video: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/jADTt5HHtiw",
"href": "https://youtu.be/jADTt5HHtiw",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | If I am correct and the LLM model changes the 'shape' of the data as it learns, then I should be able to track and utilize those shape changes as a backpropagation training mechanism, right? Well guess what, I can do that! Entropy, Sparsity, and Density, this is how I can measure the shape of the data the LLM model is creating. Nodes, Clusters, and Edges, these are the mechanisms within the neural network the LLM model updates as it learns these concepts. I measure the effects of these updates, via Entropy, Sparsity, and Density. Check out more in this video: https://youtu.be/jADTt5HHtiw | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"lunarflu"
],
"count": 1
}
] | 2024-11-17T22:57:57.000Z | 2024-11-18T05:35:31.228Z | [
{
"avatarUrl": "/avatars/709d76cb6f02ae98e13bf8ced95f624d.svg",
"fullname": "Rebelo",
"name": "JonasDiasRebelo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/TuringsSolutions/161512292032853 | 724 | 2 |
716422382816033 | [
{
"type": "text",
"value": "What a brilliant week for Open Source AI!",
"raw": "What a brilliant week for Open Source AI!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Qwen 2.5 Coder by Alibaba - 0.5B / 1.5B / 3B / 7B / 14B/ 32B (Base + Instruct) Code generation LLMs, with 32B tackling giants like Gemnini 1.5 Pro, Claude Sonnet",
"raw": "Qwen 2.5 Coder by Alibaba - 0.5B / 1.5B / 3B / 7B / 14B/ 32B (Base + Instruct) Code generation LLMs, with 32B tackling giants like Gemnini 1.5 Pro, Claude Sonnet",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/Qwen/qwen25-coder-66eaa22e6f99801bf65b0c2f",
"href": null,
"resource": {
"type": "collection",
"id": "Qwen/qwen25-coder-66eaa22e6f99801bf65b0c2f",
"discussionNum": null
},
"url": "https://huggingface.co/collections/Qwen/qwen25-coder-66eaa22e6f99801bf65b0c2f",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "LLM2CLIP from Microsoft - Leverage LLMs to train ultra-powerful CLIP models! Boosts performance over the previous SOTA by ~17%",
"raw": "LLM2CLIP from Microsoft - Leverage LLMs to train ultra-powerful CLIP models! Boosts performance over the previous SOTA by ~17%",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/microsoft/llm2clip-672323a266173cfa40b32d4c",
"href": null,
"resource": {
"type": "collection",
"id": "microsoft/llm2clip-672323a266173cfa40b32d4c",
"discussionNum": null
},
"url": "https://huggingface.co/collections/microsoft/llm2clip-672323a266173cfa40b32d4c",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Athene v2 Chat & Agent by NexusFlow - SoTA general LLM fine-tuned from Qwen 2.5 72B excels at Chat + Function Calling/ JSON/ Agents",
"raw": "Athene v2 Chat & Agent by NexusFlow - SoTA general LLM fine-tuned from Qwen 2.5 72B excels at Chat + Function Calling/ JSON/ Agents",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/Nexusflow/athene-v2-6735b85e505981a794fb02cc",
"href": null,
"resource": {
"type": "collection",
"id": "Nexusflow/athene-v2-6735b85e505981a794fb02cc",
"discussionNum": null
},
"url": "https://huggingface.co/collections/Nexusflow/athene-v2-6735b85e505981a794fb02cc",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Orca Agent Instruct by Microsoft - 1 million instruct pairs covering text editing, creative writing, coding, reading comprehension, etc - permissively licensed",
"raw": "Orca Agent Instruct by Microsoft - 1 million instruct pairs covering text editing, creative writing, coding, reading comprehension, etc - permissively licensed",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/microsoft/orca-agentinstruct-1M-v1",
"href": null,
"resource": {
"type": "dataset",
"id": "microsoft/orca-agentinstruct-1M-v1",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/microsoft/orca-agentinstruct-1M-v1",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Ultravox by FixieAI - 70B/ 8B model approaching GPT4o level, pick any LLM, train an adapter with Whisper as Audio Encoder",
"raw": "Ultravox by FixieAI - 70B/ 8B model approaching GPT4o level, pick any LLM, train an adapter with Whisper as Audio Encoder",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/reach-vb/ultravox-audio-language-model-release-67373b602af0a52b2a88ae71",
"href": null,
"resource": {
"type": "collection",
"id": "reach-vb/ultravox-audio-language-model-release-67373b602af0a52b2a88ae71",
"discussionNum": null
},
"url": "https://huggingface.co/collections/reach-vb/ultravox-audio-language-model-release-67373b602af0a52b2a88ae71",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "JanusFlow 1.3 by DeepSeek - Next iteration of their Unified MultiModal LLM Janus with RectifiedFlow",
"raw": "JanusFlow 1.3 by DeepSeek - Next iteration of their Unified MultiModal LLM Janus with RectifiedFlow",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/deepseek-ai/JanusFlow-1.3B",
"href": null,
"resource": {
"type": "model",
"id": "deepseek-ai/JanusFlow-1.3B",
"discussionNum": null
},
"url": "https://huggingface.co/deepseek-ai/JanusFlow-1.3B",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Common Corpus by Pleais - 2,003,039,184,047 multilingual, commercially permissive and high quality tokens! ",
"raw": "Common Corpus by Pleais - 2,003,039,184,047 multilingual, commercially permissive and high quality tokens! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/PleIAs/common_corpus",
"href": null,
"resource": {
"type": "dataset",
"id": "PleIAs/common_corpus",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/PleIAs/common_corpus",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I'm sure I missed a lot, can't wait for the next week!",
"raw": "I'm sure I missed a lot, can't wait for the next week!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Put down in comments what I missed! 🤗",
"raw": "Put down in comments what I missed! 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | What a brilliant week for Open Source AI!
Qwen 2.5 Coder by Alibaba - 0.5B / 1.5B / 3B / 7B / 14B/ 32B (Base + Instruct) Code generation LLMs, with 32B tackling giants like Gemnini 1.5 Pro, Claude Sonnet
https://huggingface.co/collections/Qwen/qwen25-coder-66eaa22e6f99801bf65b0c2f
LLM2CLIP from Microsoft - Leverage LLMs to train ultra-powerful CLIP models! Boosts performance over the previous SOTA by ~17%
https://huggingface.co/collections/microsoft/llm2clip-672323a266173cfa40b32d4c
Athene v2 Chat & Agent by NexusFlow - SoTA general LLM fine-tuned from Qwen 2.5 72B excels at Chat + Function Calling/ JSON/ Agents
https://huggingface.co/collections/Nexusflow/athene-v2-6735b85e505981a794fb02cc
Orca Agent Instruct by Microsoft - 1 million instruct pairs covering text editing, creative writing, coding, reading comprehension, etc - permissively licensed
https://huggingface.co/datasets/microsoft/orca-agentinstruct-1M-v1
Ultravox by FixieAI - 70B/ 8B model approaching GPT4o level, pick any LLM, train an adapter with Whisper as Audio Encoder
https://huggingface.co/collections/reach-vb/ultravox-audio-language-model-release-67373b602af0a52b2a88ae71
JanusFlow 1.3 by DeepSeek - Next iteration of their Unified MultiModal LLM Janus with RectifiedFlow
https://huggingface.co/deepseek-ai/JanusFlow-1.3B
Common Corpus by Pleais - 2,003,039,184,047 multilingual, commercially permissive and high quality tokens!
https://huggingface.co/datasets/PleIAs/common_corpus
I'm sure I missed a lot, can't wait for the next week!
Put down in comments what I missed! 🤗 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg",
"fullname": "Vaibhav Srivastav",
"name": "reach-vb",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 460,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"YaTharThShaRma999",
"AdinaY",
"cosmosgenius",
"s3nh",
"davidberenstein1957",
"John6666",
"Ameeeee",
"lunarflu",
"jsulz",
"MingxingLi",
"m-ric",
"fullstuckdev",
"mdpi-ai",
"erinys",
"IBOYAI"
],
"count": 15
},
{
"reaction": "👍",
"users": [
"victor",
"Vazdru",
"davidberenstein1957",
"nicolollo",
"lunarflu",
"fullstuckdev",
"Maitt"
],
"count": 7
},
{
"reaction": "🤗",
"users": [
"prithivMLmods",
"lunarflu",
"jsulz",
"fullstuckdev"
],
"count": 4
},
{
"reaction": "🚀",
"users": [
"John6666",
"lunarflu",
"fullstuckdev",
"erinys"
],
"count": 4
}
] | 2024-11-17T21:01:34.000Z | 2024-11-17T21:01:34.035Z | [] | /posts/reach-vb/716422382816033 | 3,996 | 0 |
449943263175424 | [
{
"type": "text",
"value": "next version of ",
"raw": "next version of ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/sequelbox/Celestia",
"href": null,
"resource": {
"type": "dataset",
"id": "sequelbox/Celestia",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/sequelbox/Celestia",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " will be ",
"raw": " will be ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/microsoft/orca-agentinstruct-1M-v1",
"href": null,
"resource": {
"type": "dataset",
"id": "microsoft/orca-agentinstruct-1M-v1",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/microsoft/orca-agentinstruct-1M-v1",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " style. coming soon",
"raw": " style. coming soon",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | next version of https://huggingface.co/datasets/sequelbox/Celestia will be https://huggingface.co/datasets/microsoft/orca-agentinstruct-1M-v1 style. coming soon | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png",
"fullname": "t.d.a.g.",
"name": "sequelbox",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 51,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"lunarflu",
"thomwolf"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"zoeywin"
],
"count": 1
}
] | 2024-11-17T18:26:14.000Z | 2024-11-19T13:16:35.127Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg",
"fullname": "Thomas Wolf",
"name": "thomwolf",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 704,
"isFollowing": false
}
] | /posts/sequelbox/449943263175424 | 1,133 | 1 |
194933978747638 | [
{
"type": "text",
"value": "Minimalistic Adapters 🎃",
"raw": "Minimalistic Adapters 🎃",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀Demo Here:",
"raw": "🚀Demo Here:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC",
"href": null,
"resource": {
"type": "space",
"id": "prithivMLmods/FLUX-LoRA-DLC",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀Model:",
"raw": "🚀Model:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "{ Quote Tuner } : ",
"raw": "{ Quote Tuner } : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Quote-LoRA",
"href": null,
"resource": {
"type": "model",
"id": "prithivMLmods/Flux.1-Dev-Quote-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Quote-LoRA",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "{ Stamp Art } : ",
"raw": "{ Stamp Art } : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Stamp-Art-LoRA",
"href": null,
"resource": {
"type": "model",
"id": "prithivMLmods/Flux.1-Dev-Stamp-Art-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Stamp-Art-LoRA",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "{ Hand Sticky } : ",
"raw": "{ Hand Sticky } : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Hand-Sticky-LoRA",
"href": null,
"resource": {
"type": "model",
"id": "prithivMLmods/Flux.1-Dev-Hand-Sticky-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Hand-Sticky-LoRA",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "{ Poster HQ } : ",
"raw": "{ Poster HQ } : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Poster-HQ-LoRA",
"href": null,
"resource": {
"type": "model",
"id": "prithivMLmods/Flux.1-Dev-Poster-HQ-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Poster-HQ-LoRA",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "{ Ctoon Min } : ",
"raw": "{ Ctoon Min } : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Ctoon-LoRA",
"href": null,
"resource": {
"type": "model",
"id": "prithivMLmods/Flux.1-Dev-Ctoon-LoRA",
"discussionNum": null
},
"url": "https://huggingface.co/prithivMLmods/Flux.1-Dev-Ctoon-LoRA",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀Collection:",
"raw": "🚀Collection:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "{ Flux LoRA Collection} : ",
"raw": "{ Flux LoRA Collection} : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be",
"href": null,
"resource": {
"type": "collection",
"id": "prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be",
"discussionNum": null
},
"url": "https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "{ LoRA Space Collection } : ",
"raw": "{ LoRA Space Collection } : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32",
"href": null,
"resource": {
"type": "collection",
"id": "prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32",
"discussionNum": null
},
"url": "https://huggingface.co/collections/prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀For More Visit",
"raw": "🚀For More Visit",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/strangerzonehf",
"href": "https://huggingface.co/strangerzonehf",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤗@prithivMLmods ",
"raw": "🤗@prithivMLmods ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Minimalistic Adapters 🎃
🚀Demo Here:
https://huggingface.co/spaces/prithivMLmods/FLUX-LoRA-DLC
🚀Model:
{ Quote Tuner } : https://huggingface.co/prithivMLmods/Flux.1-Dev-Quote-LoRA
{ Stamp Art } : https://huggingface.co/prithivMLmods/Flux.1-Dev-Stamp-Art-LoRA
{ Hand Sticky } : https://huggingface.co/prithivMLmods/Flux.1-Dev-Hand-Sticky-LoRA
{ Poster HQ } : https://huggingface.co/prithivMLmods/Flux.1-Dev-Poster-HQ-LoRA
{ Ctoon Min } : https://huggingface.co/prithivMLmods/Flux.1-Dev-Ctoon-LoRA
🚀Collection:
{ Flux LoRA Collection} : https://huggingface.co/collections/prithivMLmods/flux-lora-collections-66dd5908be2206cfaa8519be
{ LoRA Space Collection } : https://huggingface.co/collections/prithivMLmods/lora-space-collections-6714b72e0d49e1c97fbd6a32
🚀For More Visit
https://huggingface.co/strangerzonehf
.
.
.
🤗@prithivMLmods
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 393,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/12Hjd_RsUd59yyHJOTDQj.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/LTpJ-onsbsFsVK6iJC_ys.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/-UgwQiG_3Y5B8D-k85cK_.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Op2WNMPcugdMNWbztS5CN.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/3302JOoBc5WDYP_nKpDaN.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/EYAsUaQql55ZXljMfIbLX.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/gm8LkgtcQDvw7wgnO5tfq.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/pwt53LD9f-qW1jE0HWCIB.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/F8LJ03rEWMp5mthrS6CTM.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/C-SxwKF0vHIA-NIB5ZOKf.png"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/KQwN20D2aGlqRTxmcW6cI.mp4"
}
] | [] | [
{
"reaction": "🤗",
"users": [
"Ngrthm",
"RenderIo",
"darksfx",
"ai4life44",
"hypergod",
"rdrede",
"victor",
"John6666",
"merve",
"clem",
"OmbelineM"
],
"count": 11
},
{
"reaction": "❤️",
"users": [
"RenderIo",
"Csplk",
"hypergod",
"diabolic6045",
"clem",
"Ngrthm",
"ayush7",
"kimleang123"
],
"count": 8
},
{
"reaction": "🔥",
"users": [
"hypergod",
"ai4life44",
"prefetching",
"clem"
],
"count": 4
},
{
"reaction": "🚀",
"users": [
"darksfx",
"clem",
"Ngrthm"
],
"count": 3
}
] | 2024-11-17T17:14:32.000Z | 2024-11-17T22:45:55.762Z | [
{
"avatarUrl": "/avatars/b2725bb163fa15d6c5856121780d52eb.svg",
"fullname": "Ci Splunk",
"name": "Csplk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 43,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 393,
"isFollowing": false
}
] | /posts/prithivMLmods/194933978747638 | 3,866 | 3 |
982072243005650 | [
{
"type": "text",
"value": "Kohya brought massive improvements to FLUX LoRA (as low as 4 GB GPUs) and DreamBooth / Fine-Tuning (as low as 6 GB GPUs) training - check attached images in full size to see full details",
"raw": "Kohya brought massive improvements to FLUX LoRA (as low as 4 GB GPUs) and DreamBooth / Fine-Tuning (as low as 6 GB GPUs) training - check attached images in full size to see full details",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can download all configs and full instructions",
"raw": "You can download all configs and full instructions",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> ",
"raw": "> ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/112099700",
"href": "https://www.patreon.com/posts/112099700",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " - Fine Tuning post",
"raw": " - Fine Tuning post",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> ",
"raw": "> ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/110879657",
"href": "https://www.patreon.com/posts/110879657",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " - LoRA post",
"raw": " - LoRA post",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Kohya brought massive improvements to FLUX LoRA and DreamBooth / Fine-Tuning (min 6GB GPU) training.",
"raw": "Kohya brought massive improvements to FLUX LoRA and DreamBooth / Fine-Tuning (min 6GB GPU) training.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Now as low as 4GB GPUs can train FLUX LoRA with decent quality and 24GB and below GPUs got a huge speed boost when doing Full DreamBooth / Fine-Tuning training",
"raw": "Now as low as 4GB GPUs can train FLUX LoRA with decent quality and 24GB and below GPUs got a huge speed boost when doing Full DreamBooth / Fine-Tuning training",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You need minimum 4GB GPU to do a FLUX LoRA training and minimum 6 GB GPU to do FLUX DreamBooth / Full Fine-Tuning training. It is just mind blowing.",
"raw": "You need minimum 4GB GPU to do a FLUX LoRA training and minimum 6 GB GPU to do FLUX DreamBooth / Full Fine-Tuning training. It is just mind blowing.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can download all configs and full instructions > ",
"raw": "You can download all configs and full instructions > ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.patreon.com/posts/112099700",
"href": "https://www.patreon.com/posts/112099700",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The above post also has 1-click installers and downloaders for Windows, RunPod and Massed Compute",
"raw": "The above post also has 1-click installers and downloaders for Windows, RunPod and Massed Compute",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The model downloader scripts also updated and downloading 30+GB models takes total 1 minute on Massed Compute",
"raw": "The model downloader scripts also updated and downloading 30+GB models takes total 1 minute on Massed Compute",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can read the recent updates here : ",
"raw": "You can read the recent updates here : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/kohya-ss/sd-scripts/tree/sd3?tab=readme-ov-file#recent-updates",
"href": "https://github.com/kohya-ss/sd-scripts/tree/sd3?tab=readme-ov-file#recent-updates",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This is the Kohya GUI branch : ",
"raw": "This is the Kohya GUI branch : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/bmaltais/kohya_ss/tree/sd3-flux.1",
"href": "https://github.com/bmaltais/kohya_ss/tree/sd3-flux.1",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key thing to reduce VRAM usage is using block swap",
"raw": "Key thing to reduce VRAM usage is using block swap",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Kohya implemented the logic of OneTrainer to improve block swapping speed significantly and now it is supported for LoRAs as well",
"raw": "Kohya implemented the logic of OneTrainer to improve block swapping speed significantly and now it is supported for LoRAs as well",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Now you can do FP16 training with LoRAs on 24 GB and below GPUs",
"raw": "Now you can do FP16 training with LoRAs on 24 GB and below GPUs",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Now you can train a FLUX LoRA on a 4 GB GPU - key is FP8, block swap and using certain layers training (remember single layer LoRA training)",
"raw": "Now you can train a FLUX LoRA on a 4 GB GPU - key is FP8, block swap and using certain layers training (remember single layer LoRA training)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It took me more than 1 day to test all newer configs, their VRAM demands, their relative step speeds and prepare the configs :)",
"raw": "It took me more than 1 day to test all newer configs, their VRAM demands, their relative step speeds and prepare the configs :)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Kohya brought massive improvements to FLUX LoRA (as low as 4 GB GPUs) and DreamBooth / Fine-Tuning (as low as 6 GB GPUs) training - check attached images in full size to see full details
You can download all configs and full instructions
> https://www.patreon.com/posts/112099700 - Fine Tuning post
> https://www.patreon.com/posts/110879657 - LoRA post
Kohya brought massive improvements to FLUX LoRA and DreamBooth / Fine-Tuning (min 6GB GPU) training.
Now as low as 4GB GPUs can train FLUX LoRA with decent quality and 24GB and below GPUs got a huge speed boost when doing Full DreamBooth / Fine-Tuning training
You need minimum 4GB GPU to do a FLUX LoRA training and minimum 6 GB GPU to do FLUX DreamBooth / Full Fine-Tuning training. It is just mind blowing.
You can download all configs and full instructions > https://www.patreon.com/posts/112099700
The above post also has 1-click installers and downloaders for Windows, RunPod and Massed Compute
The model downloader scripts also updated and downloading 30+GB models takes total 1 minute on Massed Compute
You can read the recent updates here : https://github.com/kohya-ss/sd-scripts/tree/sd3?tab=readme-ov-file#recent-updates
This is the Kohya GUI branch : https://github.com/bmaltais/kohya_ss/tree/sd3-flux.1
Key thing to reduce VRAM usage is using block swap
Kohya implemented the logic of OneTrainer to improve block swapping speed significantly and now it is supported for LoRAs as well
Now you can do FP16 training with LoRAs on 24 GB and below GPUs
Now you can train a FLUX LoRA on a 4 GB GPU - key is FP8, block swap and using certain layers training (remember single layer LoRA training)
It took me more than 1 day to test all newer configs, their VRAM demands, their relative step speeds and prepare the configs :) | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 376,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/OLpWbbp__ZGrxkDvAku7a.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/Hn8LnZDOI6GVbz1NXho9Z.jpeg"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"MonsterMMORPG",
"John6666",
"Aduagba1",
"carlizor"
],
"count": 4
},
{
"reaction": "❤️",
"users": [
"MonsterMMORPG",
"remjie",
"jayavibhav",
"DennyDenn"
],
"count": 4
},
{
"reaction": "🔥",
"users": [
"MonsterMMORPG",
"carlizor",
"ilikeprivacy"
],
"count": 3
},
{
"reaction": "🤗",
"users": [
"MonsterMMORPG",
"prithivMLmods"
],
"count": 2
},
{
"reaction": "👀",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "😎",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "➕",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🧠",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "👍",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🤝",
"users": [
"MonsterMMORPG"
],
"count": 1
},
{
"reaction": "🤯",
"users": [
"MonsterMMORPG"
],
"count": 1
}
] | 2024-11-17T14:49:39.000Z | 2024-11-17T14:49:39.775Z | [] | /posts/MonsterMMORPG/982072243005650 | 2,368 | 0 |
953449438611686 | [
{
"type": "text",
"value": "Ok RNNs can rap too:)",
"raw": "Ok RNNs can rap too:)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Here we implement the seminal RNN paper “Generating Text with Recurrent Neural Networks\"- we train a character-level multiplicative recurrent neural network model (~250k params) for 1000 epochs with Adam opt on 2pac's \"Hit 'em Up\", sample was fun lol.",
"raw": "Here we implement the seminal RNN paper “Generating Text with Recurrent Neural Networks\"- we train a character-level multiplicative recurrent neural network model (~250k params) for 1000 epochs with Adam opt on 2pac's \"Hit 'em Up\", sample was fun lol.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Code: ",
"raw": "Code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/Jaykef/ai-algorithms/blob/main/generating_texts_with_rnns.ipynb",
"href": "https://github.com/Jaykef/ai-algorithms/blob/main/generating_texts_with_rnns.ipynb",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Ok RNNs can rap too:)
Here we implement the seminal RNN paper “Generating Text with Recurrent Neural Networks"- we train a character-level multiplicative recurrent neural network model (~250k params) for 1000 epochs with Adam opt on 2pac's "Hit 'em Up", sample was fun lol.
Code: https://github.com/Jaykef/ai-algorithms/blob/main/generating_texts_with_rnns.ipynb | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/VLgYTC3kfxsoMHmKkD8Fo.mp4"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/4-z07k3Yar-e7-AKi_7Dh.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/v-KvBI3106FyIuVZMn_hr.png"
}
] | [] | [
{
"reaction": "👀",
"users": [
"John6666",
"YaTharThShaRma999",
"gabr7elferreira"
],
"count": 3
},
{
"reaction": "🔥",
"users": [
"doguscank"
],
"count": 1
}
] | 2024-11-17T13:02:24.000Z | 2024-11-17T13:02:24.594Z | [] | /posts/Jaward/953449438611686 | 1,592 | 0 |
611948696998118 | [
{
"type": "text",
"value": "Finaly I realesed mediapipe-face animation space.",
"raw": "Finaly I realesed mediapipe-face animation space.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Mediapipe 68-points Eyes-Closed and Mouth-Opened",
"raw": "Mediapipe 68-points Eyes-Closed and Mouth-Opened",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Akjava/mediapipe-68-facial-guide-eyes-closed-mouth-opened",
"href": null,
"resource": {
"type": "space",
"id": "Akjava/mediapipe-68-facial-guide-eyes-closed-mouth-opened",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Akjava/mediapipe-68-facial-guide-eyes-closed-mouth-opened",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "[Article]Results: Converted Guide Images(eyes-closed and mouth-opened) with Flux.1 schenll img2img/inpaint",
"raw": "[Article]Results: Converted Guide Images(eyes-closed and mouth-opened) with Flux.1 schenll img2img/inpaint",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/Akjava/result-guide-image-eyes-mouth",
"href": "https://huggingface.co/blog/Akjava/result-guide-image-eyes-mouth",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "All the other tools listed are designed to support Mediapipe Face Animation",
"raw": "All the other tools listed are designed to support Mediapipe Face Animation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/collections/Akjava/mediapipe-tools-672ffe8ee7b62763c31b70c7",
"href": "https://huggingface.co/collections/Akjava/mediapipe-tools-672ffe8ee7b62763c31b70c7",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/collections/Akjava/webp-3-frame-talking-animation-tools-672819ce4989f354cdbcc739",
"href": "https://huggingface.co/collections/Akjava/webp-3-frame-talking-animation-tools-672819ce4989f354cdbcc739",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Finaly I realesed mediapipe-face animation space.
Mediapipe 68-points Eyes-Closed and Mouth-Opened
https://huggingface.co/spaces/Akjava/mediapipe-68-facial-guide-eyes-closed-mouth-opened
[Article]Results: Converted Guide Images(eyes-closed and mouth-opened) with Flux.1 schenll img2img/inpaint
https://huggingface.co/blog/Akjava/result-guide-image-eyes-mouth
All the other tools listed are designed to support Mediapipe Face Animation
https://huggingface.co/collections/Akjava/mediapipe-tools-672ffe8ee7b62763c31b70c7
https://huggingface.co/collections/Akjava/webp-3-frame-talking-animation-tools-672819ce4989f354cdbcc739 | {
"avatarUrl": "/avatars/fb866e3758189d70488fc6a879151f45.svg",
"fullname": "Akihito Miyazaki",
"name": "Akjava",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 4,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666"
],
"count": 1
}
] | 2024-11-17T12:43:13.000Z | 2024-11-17T12:43:13.043Z | [] | /posts/Akjava/611948696998118 | 410 | 0 |
478756824597278 | [
{
"type": "text",
"value": "Good folks at ",
"raw": "Good folks at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@nvidia",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "nvidia",
"label": null,
"lang": null
},
{
"type": "text",
"value": " and @Tsinghua_Uni have released LLAMA-MESH - A Revolutionary Approach to 3D Content Generation!",
"raw": " and @Tsinghua_Uni have released LLAMA-MESH - A Revolutionary Approach to 3D Content Generation!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This innovative framework enables the direct generation of 3D meshes from natural language prompts while maintaining strong language capabilities.",
"raw": "This innovative framework enables the direct generation of 3D meshes from natural language prompts while maintaining strong language capabilities.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Here is the Architecture & Implementation!",
"raw": "Here is the Architecture & Implementation!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ">> Core Components",
"raw": ">> Core Components",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model Foundation ",
"raw": "Model Foundation ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- If you haven't guessed it yet, it's built on the LLaMA-3.1-8B-Instruct base model ",
"raw": "- If you haven't guessed it yet, it's built on the LLaMA-3.1-8B-Instruct base model ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Maintains original language capabilities while adding 3D generation ",
"raw": "- Maintains original language capabilities while adding 3D generation ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Context length is set to 8,000 tokens ",
"raw": "- Context length is set to 8,000 tokens ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3D Representation Strategy ",
"raw": "3D Representation Strategy ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Uses the OBJ file format for mesh representation ",
"raw": "- Uses the OBJ file format for mesh representation ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Quantizes vertex coordinates into 64 discrete bins per axis ",
"raw": "- Quantizes vertex coordinates into 64 discrete bins per axis ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Sorts vertices by z-y-x coordinates, from lowest to highest ",
"raw": "- Sorts vertices by z-y-x coordinates, from lowest to highest ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Sorts faces by the lowest vertex indices for consistency ",
"raw": "- Sorts faces by the lowest vertex indices for consistency ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Data Processing Pipeline ",
"raw": "Data Processing Pipeline ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Filters meshes to a maximum of 500 faces for computational efficiency ",
"raw": "- Filters meshes to a maximum of 500 faces for computational efficiency ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Applies random rotations (0°, 90°, 180°, 270°) for data augmentation ",
"raw": "- Applies random rotations (0°, 90°, 180°, 270°) for data augmentation ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Generates ~125k mesh variations from 31k base meshes ",
"raw": "- Generates ~125k mesh variations from 31k base meshes ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Uses Cap3D-generated captions for text descriptions ",
"raw": "- Uses Cap3D-generated captions for text descriptions ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ">> Training Framework",
"raw": ">> Training Framework",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Dataset Composition ",
"raw": "Dataset Composition ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 40% Mesh Generation tasks ",
"raw": "- 40% Mesh Generation tasks ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 20% Mesh Understanding tasks ",
"raw": "- 20% Mesh Understanding tasks ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 40% General Conversation (UltraChat dataset) ",
"raw": "- 40% General Conversation (UltraChat dataset) ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 8x training turns for generation, 4x for understanding ",
"raw": "- 8x training turns for generation, 4x for understanding ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Training Configuration ",
"raw": "Training Configuration ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Deployed on 32 A100 GPUs (for Nvidia, this is literally in-house) ",
"raw": "- Deployed on 32 A100 GPUs (for Nvidia, this is literally in-house) ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 21,000 training iterations ",
"raw": "- 21,000 training iterations ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Global batch size: 128 ",
"raw": "- Global batch size: 128 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- AdamW optimizer with a 1e-5 learning rate ",
"raw": "- AdamW optimizer with a 1e-5 learning rate ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 30-step warmup with cosine scheduling ",
"raw": "- 30-step warmup with cosine scheduling ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Total training time: approximately 3 days (based on the paper) ",
"raw": "- Total training time: approximately 3 days (based on the paper) ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This research opens exciting possibilities for intuitive 3D content creation through natural language interaction. The future of digital design is conversational!",
"raw": "This research opens exciting possibilities for intuitive 3D content creation through natural language interaction. The future of digital design is conversational!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Good folks at @nvidia and @Tsinghua_Uni have released LLAMA-MESH - A Revolutionary Approach to 3D Content Generation!
This innovative framework enables the direct generation of 3D meshes from natural language prompts while maintaining strong language capabilities.
Here is the Architecture & Implementation!
>> Core Components
Model Foundation
- If you haven't guessed it yet, it's built on the LLaMA-3.1-8B-Instruct base model
- Maintains original language capabilities while adding 3D generation
- Context length is set to 8,000 tokens
3D Representation Strategy
- Uses the OBJ file format for mesh representation
- Quantizes vertex coordinates into 64 discrete bins per axis
- Sorts vertices by z-y-x coordinates, from lowest to highest
- Sorts faces by the lowest vertex indices for consistency
Data Processing Pipeline
- Filters meshes to a maximum of 500 faces for computational efficiency
- Applies random rotations (0°, 90°, 180°, 270°) for data augmentation
- Generates ~125k mesh variations from 31k base meshes
- Uses Cap3D-generated captions for text descriptions
>> Training Framework
Dataset Composition
- 40% Mesh Generation tasks
- 20% Mesh Understanding tasks
- 40% General Conversation (UltraChat dataset)
- 8x training turns for generation, 4x for understanding
Training Configuration
- Deployed on 32 A100 GPUs (for Nvidia, this is literally in-house)
- 21,000 training iterations
- Global batch size: 128
- AdamW optimizer with a 1e-5 learning rate
- 30-step warmup with cosine scheduling
- Total training time: approximately 3 days (based on the paper)
This research opens exciting possibilities for intuitive 3D content creation through natural language interaction. The future of digital design is conversational! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 219,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/7UzRyFrbCXT2wC_QDLKLx.mp4"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"Mrdesigner14",
"John6666",
"roland0822",
"DatGG",
"KadirErturk",
"EdilCamil"
],
"count": 6
},
{
"reaction": "🚀",
"users": [
"John6666",
"casper911"
],
"count": 2
},
{
"reaction": "👍",
"users": [
"csabakecskemeti",
"gauravpatil"
],
"count": 2
}
] | 2024-11-17T07:57:31.000Z | 2024-11-17T07:57:31.455Z | [] | /posts/singhsidhukuldeep/478756824597278 | 2,281 | 0 |
578160125260008 | [
{
"type": "text",
"value": "OmniVision-968M: a new local VLM for edge devices, fast & small but performant",
"raw": "OmniVision-968M: a new local VLM for edge devices, fast & small but performant",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💨 a new vision language model with 9x less image tokens, super efficient ",
"raw": "💨 a new vision language model with 9x less image tokens, super efficient ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📖 aligned with DPO for reducing hallucinations",
"raw": "📖 aligned with DPO for reducing hallucinations",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "⚡️ Apache 2.0 license 🔥",
"raw": "⚡️ Apache 2.0 license 🔥",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo hf.co/spaces/NexaAIDev/omnivlm-dpo-demo",
"raw": "Demo hf.co/spaces/NexaAIDev/omnivlm-dpo-demo",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model ",
"raw": "Model ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/NexaAIDev/omnivision-968M",
"href": null,
"resource": {
"type": "model",
"id": "NexaAIDev/omnivision-968M",
"discussionNum": null
},
"url": "https://huggingface.co/NexaAIDev/omnivision-968M",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | OmniVision-968M: a new local VLM for edge devices, fast & small but performant
💨 a new vision language model with 9x less image tokens, super efficient
📖 aligned with DPO for reducing hallucinations
⚡️ Apache 2.0 license 🔥
Demo hf.co/spaces/NexaAIDev/omnivlm-dpo-demo
Model https://huggingface.co/NexaAIDev/omnivision-968M | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5589,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/UpftcDUFh7eDXfvTbRROY.jpeg"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"YaTharThShaRma999",
"John6666",
"quyet7779",
"typesdigital",
"Csplk",
"Norod78",
"not-lain",
"Sri-Vigneshwar-DJ",
"ai-everyday",
"victor",
"lhoestq",
"Nydaym",
"Catering3733",
"ogozcelik",
"ucsahin",
"appvoid",
"FGOTYT",
"OmbelineM"
],
"count": 18
},
{
"reaction": "👀",
"users": [
"Csplk",
"maxiw",
"not-lain",
"ucsahin"
],
"count": 4
},
{
"reaction": "🤗",
"users": [
"prithivMLmods",
"ucsahin"
],
"count": 2
}
] | 2024-11-16T23:26:19.000Z | 2024-11-18T16:19:47.318Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6048ea0c0f59ab4b614f1836/8Eg8IyPtJgOHmywcJ7E8a.jpeg",
"fullname": "RITABRATA MAITI",
"name": "ritabratamaiti",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
}
] | /posts/merve/578160125260008 | 4,724 | 4 |
269038377723431 | [
{
"type": "text",
"value": "Kokoro: a small, fast 80M param TTS model hosted on ZeroGPU at ",
"raw": "Kokoro: a small, fast 80M param TTS model hosted on ZeroGPU at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://hf.co/spaces/hexgrad/Kokoro-TTS",
"href": null,
"resource": {
"type": "space",
"id": "hexgrad/Kokoro-TTS",
"discussionNum": null
},
"url": "https://hf.co/spaces/hexgrad/Kokoro-TTS",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Kokoro: a small, fast 80M param TTS model hosted on ZeroGPU at https://hf.co/spaces/hexgrad/Kokoro-TTS | {
"avatarUrl": "/avatars/02074f60a2ef445a29343ed90a303cc6.svg",
"fullname": "Hexgrad",
"name": "hexgrad",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 20,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"YaTharThShaRma999",
"John6666",
"Pendrokar",
"Sri-Vigneshwar-DJ",
"ai-everyday",
"bendangelo",
"ecyht2",
"merve",
"deki",
"victor",
"s3nh",
"Gatozu35",
"fireblade2534"
],
"count": 13
}
] | 2024-11-16T22:37:07.000Z | 2024-11-18T07:27:42.811Z | [
{
"avatarUrl": "/avatars/a1d86d990de3b90ed8fdb29c60337219.svg",
"fullname": "Be",
"name": "bendangelo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/52a153d04d325469e1be69bce610ebe5.svg",
"fullname": "ecyht2",
"name": "ecyht2",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
}
] | /posts/hexgrad/269038377723431 | 3,146 | 3 |
143199321478313 | [
{
"type": "text",
"value": "🚀 Introducing the Model Drops Tracker! 🕵️♂️",
"raw": "🚀 Introducing the Model Drops Tracker! 🕵️♂️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Feeling overwhelmed by the AI model release frenzy? 🤯 You're not alone!",
"raw": "Feeling overwhelmed by the AI model release frenzy? 🤯 You're not alone!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I built this simple tool to help us all keep up:",
"raw": "I built this simple tool to help us all keep up:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Filter recent models from the 🤗 Hub",
"raw": "- Filter recent models from the 🤗 Hub",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Set minimum likes threshold",
"raw": "- Set minimum likes threshold",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Choose how recent you want to go",
"raw": "- Choose how recent you want to go",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Try it out and let me know what you think: ",
"raw": "Try it out and let me know what you think: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/fdaudens/Model-Drops-Tracker",
"href": null,
"resource": {
"type": "space",
"id": "fdaudens/Model-Drops-Tracker",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/fdaudens/Model-Drops-Tracker",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Any features you'd like to see added?",
"raw": "Any features you'd like to see added?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "#AIModels",
"raw": "#AIModels",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀 Introducing the Model Drops Tracker! 🕵️♂️
Feeling overwhelmed by the AI model release frenzy? 🤯 You're not alone!
I built this simple tool to help us all keep up:
- Filter recent models from the 🤗 Hub
- Set minimum likes threshold
- Choose how recent you want to go
Try it out and let me know what you think: https://huggingface.co/spaces/fdaudens/Model-Drops-Tracker
Any features you'd like to see added?
#AIModels | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/3UslU7TB7CHobIwSadfUa.mp4"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"davidberenstein1957",
"ajibawa-2023",
"jdzw2014",
"lucianosb",
"ecyht2",
"jgitsolutions",
"John6666",
"LewSypher",
"Nymbo"
],
"count": 9
},
{
"reaction": "👍",
"users": [
"adorkin"
],
"count": 1
}
] | 2024-07-24T19:36:33.000Z | 2024-07-26T18:33:31.204Z | [
{
"avatarUrl": "/avatars/52a153d04d325469e1be69bce610ebe5.svg",
"fullname": "ecyht2",
"name": "ecyht2",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/648a8d2d4ea19a8097e1c0d7/PKDiLe0WCwzNVWgvLvqr7.jpeg",
"fullname": "Henry Holloway",
"name": "henryholloway",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/fdaudens/143199321478313 | 2,286 | 3 |
336620283743824 | [
{
"type": "text",
"value": "🤖💡Just tried out ",
"raw": "🤖💡Just tried out ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@m-ric",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "m-ric",
"label": null,
"lang": null
},
{
"type": "text",
"value": " 's new LLaMA-3.1 70B agent for data analysis. Impressive stuff. ",
"raw": " 's new LLaMA-3.1 70B agent for data analysis. Impressive stuff. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚢📊 Fed it the Titanic passenger dataset with minimal instructions. The agent autonomously dug in, tested hypotheses, and reached some intriguing conclusions:",
"raw": "🚢📊 Fed it the Titanic passenger dataset with minimal instructions. The agent autonomously dug in, tested hypotheses, and reached some intriguing conclusions:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "\"Lower class passengers less likely to survive, slight negative correlation with age, and positive correlation between fare price and survival.\" ",
"raw": "\"Lower class passengers less likely to survive, slight negative correlation with age, and positive correlation between fare price and survival.\" ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📈It even generated charts to visualize the findings! ",
"raw": "📈It even generated charts to visualize the findings! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🧠💼 Great potential for business intelligence, research, and decision-making when we can just upload datasets and let AI agents loose on them. ",
"raw": "🧠💼 Great potential for business intelligence, research, and decision-making when we can just upload datasets and let AI agents loose on them. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉 Check it out: ",
"raw": "👉 Check it out: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/m-ric/agent-data-analyst",
"href": null,
"resource": {
"type": "space",
"id": "m-ric/agent-data-analyst",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/m-ric/agent-data-analyst",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤔 Any particular use cases you're excited about?",
"raw": "🤔 Any particular use cases you're excited about?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "#AIinDataAnalysis #MachineLearning #DataScience",
"raw": "#AIinDataAnalysis #MachineLearning #DataScience",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🤖💡Just tried out @m-ric 's new LLaMA-3.1 70B agent for data analysis. Impressive stuff.
🚢📊 Fed it the Titanic passenger dataset with minimal instructions. The agent autonomously dug in, tested hypotheses, and reached some intriguing conclusions:
"Lower class passengers less likely to survive, slight negative correlation with age, and positive correlation between fare price and survival."
📈It even generated charts to visualize the findings!
🧠💼 Great potential for business intelligence, research, and decision-making when we can just upload datasets and let AI agents loose on them.
👉 Check it out: https://huggingface.co/spaces/m-ric/agent-data-analyst
🤔 Any particular use cases you're excited about?
#AIinDataAnalysis #MachineLearning #DataScience | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/iDJVQOcxoCJXAMFVwitoq.qt"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 494
}
] | [
{
"reaction": "🔥",
"users": [
"osanseviero",
"m-ric"
],
"count": 2
}
] | 2024-07-24T15:12:35.000Z | 2024-07-25T17:17:10.453Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63c83abc46421a2efe8160d0/Yy6IvAusEgxQ0qhCvB0Ka.jpeg",
"fullname": "Mac Szankin",
"name": "macsz",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/fdaudens/336620283743824 | 814 | 3 |
317039567154641 | [
{
"type": "text",
"value": "Hello there, ",
"raw": "Hello there, ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "New model released, my goal was to try finetune on the last Llama-3.1-8B-Instruct but not a small train, I wanted to do something useful.",
"raw": "New model released, my goal was to try finetune on the last Llama-3.1-8B-Instruct but not a small train, I wanted to do something useful.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "One of the rare model that I didn't made for RP, or in the goal to uncensor it (but I did anyway kek).",
"raw": "One of the rare model that I didn't made for RP, or in the goal to uncensor it (but I did anyway kek).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The model was trained on 9M Claude conversations ONLY, giving him another writting style.",
"raw": "The model was trained on 9M Claude conversations ONLY, giving him another writting style.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude",
"href": null,
"resource": {
"type": "model",
"id": "Undi95/Meta-Llama-3.1-8B-Claude",
"discussionNum": null
},
"url": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " > OG release fp32, it's the epoch 2",
"raw": " > OG release fp32, it's the epoch 2",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-bf16",
"href": null,
"resource": {
"type": "model",
"id": "Undi95/Meta-Llama-3.1-8B-Claude-bf16",
"discussionNum": null
},
"url": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-bf16",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " > Base model resharded in bf16 waiting for available quant without issues",
"raw": " > Base model resharded in bf16 waiting for available quant without issues",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Since it's frustrating to be censored using a local model, orthogonal activation steering was used, trying to force the model to never refuse a prompt.",
"raw": "Since it's frustrating to be censored using a local model, orthogonal activation steering was used, trying to force the model to never refuse a prompt.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-68fail-3000total",
"href": null,
"resource": {
"type": "model",
"id": "Undi95/Meta-Llama-3.1-8B-Claude-68fail-3000total",
"discussionNum": null
},
"url": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-68fail-3000total",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " > Uncensored model, refuse 68 times on 3000 toxic prompt",
"raw": " > Uncensored model, refuse 68 times on 3000 toxic prompt",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-39fail-3000total",
"href": null,
"resource": {
"type": "model",
"id": "Undi95/Meta-Llama-3.1-8B-Claude-39fail-3000total",
"discussionNum": null
},
"url": "https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-39fail-3000total",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " > Uncensored model, refuse 39 times on 3000 toxic prompt",
"raw": " > Uncensored model, refuse 39 times on 3000 toxic prompt",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It still refuse some prompt but the majority of them is uncensored. OAS can make a model more dumb or make the base perplexity go higher, so I didn't snipe for 0 refusal.",
"raw": "It still refuse some prompt but the majority of them is uncensored. OAS can make a model more dumb or make the base perplexity go higher, so I didn't snipe for 0 refusal.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I don't do non-RP model a lot so any feedback is welcome, I would like to re-use this base for some others future project if needed.",
"raw": "I don't do non-RP model a lot so any feedback is welcome, I would like to re-use this base for some others future project if needed.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hello there,
New model released, my goal was to try finetune on the last Llama-3.1-8B-Instruct but not a small train, I wanted to do something useful.
One of the rare model that I didn't made for RP, or in the goal to uncensor it (but I did anyway kek).
The model was trained on 9M Claude conversations ONLY, giving him another writting style.
https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude > OG release fp32, it's the epoch 2
https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-bf16 > Base model resharded in bf16 waiting for available quant without issues
Since it's frustrating to be censored using a local model, orthogonal activation steering was used, trying to force the model to never refuse a prompt.
https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-68fail-3000total > Uncensored model, refuse 68 times on 3000 toxic prompt
https://huggingface.co/Undi95/Meta-Llama-3.1-8B-Claude-39fail-3000total > Uncensored model, refuse 39 times on 3000 toxic prompt
It still refuse some prompt but the majority of them is uncensored. OAS can make a model more dumb or make the base perplexity go higher, so I didn't snipe for 0 refusal.
I don't do non-RP model a lot so any feedback is welcome, I would like to re-use this base for some others future project if needed. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63ab1241ad514ca8d1430003/d-43TcOxG-zqAbzrH2m7H.png",
"fullname": "Undi",
"name": "Undi95",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 3311,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"Chief-Inspector",
"MarinaraSpaghetti",
"Herman555",
"John6666",
"AtonMountlook",
"Ramikan-BR",
"DuckyBlender",
"osanseviero",
"mambiux",
"den0620",
"win10"
],
"count": 11
}
] | 2024-07-24T15:08:08.000Z | 2024-07-24T22:22:31.327Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/666627d86cd2ef174a6e2257/QVRd7WN6kVCtT5BDpf8vq.png",
"fullname": "Invisietch",
"name": "invisietch",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 54,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63ab1241ad514ca8d1430003/d-43TcOxG-zqAbzrH2m7H.png",
"fullname": "Undi",
"name": "Undi95",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 3311,
"isFollowing": false
},
{
"avatarUrl": "/avatars/18daa2d580f5f35cf850bc9df8a03755.svg",
"fullname": "Sporkness",
"name": "SporkySporkness",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5,
"isFollowing": false
}
] | /posts/Undi95/317039567154641 | 10,458 | 4 |
746206932023722 | [
{
"type": "text",
"value": "Professional Threads Post Writer",
"raw": "Professional Threads Post Writer",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://hf.co/chat/assistant/66a0ee6fc397ecb70cee100d",
"href": "https://hf.co/chat/assistant/66a0ee6fc397ecb70cee100d",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Professional Threads Post Writer
https://hf.co/chat/assistant/66a0ee6fc397ecb70cee100d | {
"avatarUrl": "/avatars/d773a7dd9b706759131fc482ab71ced7.svg",
"fullname": "[email protected]",
"name": "Taf2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64841af2295256340e4b9f88/KjqQvbwV8O9Jtz_-L49tS.webp"
}
] | [] | [] | 2024-07-24T12:11:18.000Z | 2024-07-24T12:11:18.282Z | [] | /posts/Taf2023/746206932023722 | 491 | 0 |
999630929802342 | [
{
"type": "text",
"value": "Llama 3.1 405B Instruct beats GPT-4o on MixEval-Hard",
"raw": "Llama 3.1 405B Instruct beats GPT-4o on MixEval-Hard",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Just ran MixEval for 405B, Sonnet-3.5 and 4o, with 405B landing right between the other two at 66.19",
"raw": "Just ran MixEval for 405B, Sonnet-3.5 and 4o, with 405B landing right between the other two at 66.19",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The GPT-4o result of 64.7 replicated locally but Sonnet-3.5 actually scored 70.25/69.45 in my replications 🤔 Still well ahead of the other 2 though.",
"raw": "The GPT-4o result of 64.7 replicated locally but Sonnet-3.5 actually scored 70.25/69.45 in my replications 🤔 Still well ahead of the other 2 though.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Sammple of 1 of the eval calls here: ",
"raw": "Sammple of 1 of the eval calls here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://wandb.ai/morgan/MixEval/weave/calls/07b05ae2-2ef5-4525-98a6-c59963b76fe1",
"href": "https://wandb.ai/morgan/MixEval/weave/calls/07b05ae2-2ef5-4525-98a6-c59963b76fe1",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Quick auto-logging tracing for openai-compatible clients and many more here: ",
"raw": "Quick auto-logging tracing for openai-compatible clients and many more here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://wandb.github.io/weave/quickstart/",
"href": "https://wandb.github.io/weave/quickstart/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Llama 3.1 405B Instruct beats GPT-4o on MixEval-Hard
Just ran MixEval for 405B, Sonnet-3.5 and 4o, with 405B landing right between the other two at 66.19
The GPT-4o result of 64.7 replicated locally but Sonnet-3.5 actually scored 70.25/69.45 in my replications 🤔 Still well ahead of the other 2 though.
Sammple of 1 of the eval calls here: https://wandb.ai/morgan/MixEval/weave/calls/07b05ae2-2ef5-4525-98a6-c59963b76fe1
Quick auto-logging tracing for openai-compatible clients and many more here: https://wandb.github.io/weave/quickstart/
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1618571183509-5f05a97d5d08220171a0ad9d.png",
"fullname": "Morgan McGuire",
"name": "morgan",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 18,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f05a97d5d08220171a0ad9d/gaQche2YCXq0TTnmmH2Ol.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f05a97d5d08220171a0ad9d/TbXxia4lQrX5KLU-6405Z.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f05a97d5d08220171a0ad9d/TdTwni8FlXwUvBwEdrXFC.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f05a97d5d08220171a0ad9d/r0Zz8rrXpvj0oQ6avJWNi.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/5f05a97d5d08220171a0ad9d/gxIoZoSeQFVMtnBcD08nK.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"zenosai",
"macsz",
"Corvius"
],
"count": 3
},
{
"reaction": "🔥",
"users": [
"Rohitkhatri75436"
],
"count": 1
}
] | 2024-07-24T11:27:03.000Z | 2024-07-24T11:27:03.893Z | [] | /posts/morgan/999630929802342 | 1,296 | 0 |
223487471308830 | [
{
"type": "text",
"value": "Hi HF community!🤗",
"raw": "Hi HF community!🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Hope y'all are as excited as me for the release of Llama 3.1! 🦙",
"raw": "Hope y'all are as excited as me for the release of Llama 3.1! 🦙",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Following the release, I built a space exploiting HF Inference API, thanks to a recipe you can find in this awesome GitHub repo (",
"raw": "Following the release, I built a space exploiting HF Inference API, thanks to a recipe you can find in this awesome GitHub repo (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/huggingface-llama-recipes/",
"href": "https://github.com/huggingface/huggingface-llama-recipes/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "): you can now run Llama-3.1-405B customizing its system instructions and other parameters, for free! 😇",
"raw": "): you can now run Llama-3.1-405B customizing its system instructions and other parameters, for free! 😇",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Follow this link: ",
"raw": "Follow this link: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/as-cle-bert/Llama-3.1-405B-FP8",
"href": null,
"resource": {
"type": "space",
"id": "as-cle-bert/Llama-3.1-405B-FP8",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/as-cle-bert/Llama-3.1-405B-FP8",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and let the fun begin!🍕",
"raw": " and let the fun begin!🍕",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hi HF community!🤗
Hope y'all are as excited as me for the release of Llama 3.1! 🦙
Following the release, I built a space exploiting HF Inference API, thanks to a recipe you can find in this awesome GitHub repo (https://github.com/huggingface/huggingface-llama-recipes/): you can now run Llama-3.1-405B customizing its system instructions and other parameters, for free! 😇
Follow this link: https://huggingface.co/spaces/as-cle-bert/Llama-3.1-405B-FP8 and let the fun begin!🍕 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e330e7edc2f7306e252448/ucpk9c8x0UafGM4mXTrRy.jpeg",
"fullname": "Astra Clelia Bertelli",
"name": "as-cle-bert",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 650,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"Nymbo",
"hg-crown",
"victor",
"Taylor658",
"osanseviero",
"louisbrulenaudet"
],
"count": 6
},
{
"reaction": "🤗",
"users": [
"Nymbo",
"osanseviero"
],
"count": 2
},
{
"reaction": "🔥",
"users": [
"alee5331",
"palash147"
],
"count": 2
},
{
"reaction": "🤯",
"users": [
"Ruaruaruabick"
],
"count": 1
}
] | 2024-07-23T23:24:33.000Z | 2024-07-25T05:13:22.273Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
}
] | /posts/as-cle-bert/223487471308830 | 2,599 | 1 |
518017846754230 | [
{
"type": "text",
"value": "JUST RELEASED: Fireplace 2 for Llama 3.1 8b Instruct!",
"raw": "JUST RELEASED: Fireplace 2 for Llama 3.1 8b Instruct!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Fireplace 2 is an 'expansion pack' of structured outputs you can request during your chat, using special request tokens to let Llama know you're looking for specific types of responses:",
"raw": "Fireplace 2 is an 'expansion pack' of structured outputs you can request during your chat, using special request tokens to let Llama know you're looking for specific types of responses:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Inline function calls",
"raw": "Inline function calls",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "SQL queries",
"raw": "SQL queries",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "JSON objects",
"raw": "JSON objects",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Data visualization with matplotlib",
"raw": "Data visualization with matplotlib",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Fireplace2",
"href": null,
"resource": {
"type": "model",
"id": "ValiantLabs/Llama3.1-8B-Fireplace2",
"discussionNum": null
},
"url": "https://huggingface.co/ValiantLabs/Llama3.1-8B-Fireplace2",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | JUST RELEASED: Fireplace 2 for Llama 3.1 8b Instruct!
Fireplace 2 is an 'expansion pack' of structured outputs you can request during your chat, using special request tokens to let Llama know you're looking for specific types of responses:
Inline function calls
SQL queries
JSON objects
Data visualization with matplotlib
https://huggingface.co/ValiantLabs/Llama3.1-8B-Fireplace2 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png",
"fullname": "t.d.a.g.",
"name": "sequelbox",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 51,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"victor",
"Taylor658",
"flflow"
],
"count": 3
}
] | 2024-07-23T22:46:18.000Z | 2024-07-27T12:50:05.046Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63444f2687964b331809eb55/WvZivsvKsM_t0tBtakovK.png",
"fullname": "t.d.a.g.",
"name": "sequelbox",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 51,
"isFollowing": false
},
{
"avatarUrl": "/avatars/4efbc5a76387d633144269181ee29b17.svg",
"fullname": "zouhair ",
"name": "zouhaor",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/sequelbox/518017846754230 | 1,328 | 2 |
833816740809851 | [
{
"type": "text",
"value": "Meta Researchers: How many compute hours should we use to train Llama 3.1?",
"raw": "Meta Researchers: How many compute hours should we use to train Llama 3.1?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Mr. Zuck: Yes! 🤖💪",
"raw": "Mr. Zuck: Yes! 🤖💪",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Good folks at ",
"raw": "Good folks at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@AIatMeta",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "AIatMeta",
"label": null,
"lang": null
},
{
"type": "text",
"value": " did not just release the models but also published a 92-page detailed paper 📄 on their findings and technical aspects of the models and their training process!",
"raw": " did not just release the models but also published a 92-page detailed paper 📄 on their findings and technical aspects of the models and their training process!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Generally, we just gobble up these weights and forget the compute infrastructure used to train these models. 🖥️🚀",
"raw": "Generally, we just gobble up these weights and forget the compute infrastructure used to train these models. 🖥️🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Here are some interesting findings about the computing infrastructure of Llamas:",
"raw": "Here are some interesting findings about the computing infrastructure of Llamas:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Llama 1 and 2 models were trained on ",
"raw": "- Llama 1 and 2 models were trained on ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Meta",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Meta",
"label": null,
"lang": null
},
{
"type": "text",
"value": " 's AI Research SuperCluster. Llama 3 was migrated to Meta’s production clusters! 📊",
"raw": " 's AI Research SuperCluster. Llama 3 was migrated to Meta’s production clusters! 📊",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- That's 16,000 H100 GPUs, with each GPU featuring 700W TDP and 80GB HBM3, arranged in Meta’s Grand Teton AI server platform. 🖥️🔋",
"raw": "- That's 16,000 H100 GPUs, with each GPU featuring 700W TDP and 80GB HBM3, arranged in Meta’s Grand Teton AI server platform. 🖥️🔋",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- What about storing checkpoints? Used Tectonic, a distributed file system, for storage, with capacities reaching 240 PB and peak throughput of 7 TB/s. 💾📈",
"raw": "- What about storing checkpoints? Used Tectonic, a distributed file system, for storage, with capacities reaching 240 PB and peak throughput of 7 TB/s. 💾📈",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Meta's mad lads saved each GPU’s model state, ranging from 1 MB to 4 GB per GPU, for recovery and debugging. 🛠️🔍",
"raw": "- Meta's mad lads saved each GPU’s model state, ranging from 1 MB to 4 GB per GPU, for recovery and debugging. 🛠️🔍",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If this sounds big, well, they document the humungous challenges that come with it:",
"raw": "If this sounds big, well, they document the humungous challenges that come with it:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- In the 54-day training period, there were 466 job interruptions. 🕒🔄",
"raw": "- In the 54-day training period, there were 466 job interruptions. 🕒🔄",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- About 78% of unexpected interruptions were attributed to confirmed or suspected hardware issues. Mostly GPUs! 💥🖥️",
"raw": "- About 78% of unexpected interruptions were attributed to confirmed or suspected hardware issues. Mostly GPUs! 💥🖥️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Saving all checkpoints is cool until you do it for the 300B+ parameters model. The bursty nature of checkpoint writes, essential for state-saving during training, periodically saturated the storage fabric, impacting performance. 📉💾",
"raw": "- Saving all checkpoints is cool until you do it for the 300B+ parameters model. The bursty nature of checkpoint writes, essential for state-saving during training, periodically saturated the storage fabric, impacting performance. 📉💾",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- With all this, effective training time—measured as the time spent on useful training over the elapsed time—was higher than 90%. ⏱️📊",
"raw": "- With all this, effective training time—measured as the time spent on useful training over the elapsed time—was higher than 90%. ⏱️📊",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I think this is the stuff that movies can be made on! 🎬🌟",
"raw": "I think this is the stuff that movies can be made on! 🎬🌟",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://ai.meta.com/research/publications/the-llama-3-herd-of-models/",
"href": "https://ai.meta.com/research/publications/the-llama-3-herd-of-models/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Meta Researchers: How many compute hours should we use to train Llama 3.1?
Mr. Zuck: Yes! 🤖💪
Good folks at @AIatMeta did not just release the models but also published a 92-page detailed paper 📄 on their findings and technical aspects of the models and their training process!
Generally, we just gobble up these weights and forget the compute infrastructure used to train these models. 🖥️🚀
Here are some interesting findings about the computing infrastructure of Llamas:
- Llama 1 and 2 models were trained on @Meta 's AI Research SuperCluster. Llama 3 was migrated to Meta’s production clusters! 📊
- That's 16,000 H100 GPUs, with each GPU featuring 700W TDP and 80GB HBM3, arranged in Meta’s Grand Teton AI server platform. 🖥️🔋
- What about storing checkpoints? Used Tectonic, a distributed file system, for storage, with capacities reaching 240 PB and peak throughput of 7 TB/s. 💾📈
- Meta's mad lads saved each GPU’s model state, ranging from 1 MB to 4 GB per GPU, for recovery and debugging. 🛠️🔍
If this sounds big, well, they document the humungous challenges that come with it:
- In the 54-day training period, there were 466 job interruptions. 🕒🔄
- About 78% of unexpected interruptions were attributed to confirmed or suspected hardware issues. Mostly GPUs! 💥🖥️
- Saving all checkpoints is cool until you do it for the 300B+ parameters model. The bursty nature of checkpoint writes, essential for state-saving during training, periodically saturated the storage fabric, impacting performance. 📉💾
- With all this, effective training time—measured as the time spent on useful training over the elapsed time—was higher than 90%. ⏱️📊
I think this is the stuff that movies can be made on! 🎬🌟
Paper: https://ai.meta.com/research/publications/the-llama-3-herd-of-models/ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 219,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/dqpSxD7BM-weUnMYRSGIC.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61e8c67cee1e1440121f0240/9sb__WsO5mwmdHHa6xKNc.jpeg",
"fullname": "Meta World Peace",
"name": "Meta",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5
}
] | [
{
"reaction": "🤯",
"users": [
"YaTharThShaRma999",
"ZachZimm",
"palash147",
"Ketansomewhere",
"cnmoro",
"GPT007",
"John6666",
"Ruaruaruabick",
"ajibawa-2023",
"adorkin",
"Chief-Inspector",
"louisbrulenaudet"
],
"count": 12
},
{
"reaction": "🤝",
"users": [
"bezir"
],
"count": 1
}
] | 2024-07-23T21:50:53.000Z | 2024-08-23T00:55:38.066Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 219,
"isFollowing": false
},
{
"avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg",
"fullname": "Tyrone Pierce",
"name": "piercyy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
}
] | /posts/singhsidhukuldeep/833816740809851 | 2,767 | 3 |
705580461777947 | [
{
"type": "text",
"value": "🚀 Exciting news! We've just launched \"Thundermoon\" - the latest version of Moondream, our open-source vision language model! 🌙",
"raw": "🚀 Exciting news! We've just launched \"Thundermoon\" - the latest version of Moondream, our open-source vision language model! 🌙",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key improvements in this release:",
"raw": "Key improvements in this release:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1. Massive leap in OCR capabilities",
"raw": "1. Massive leap in OCR capabilities",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "2. Enhanced document understanding",
"raw": "2. Enhanced document understanding",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3. Significant boosts across key metrics:",
"raw": "3. Significant boosts across key metrics:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " * DocVQA: 61.9 (↑103%)",
"raw": " * DocVQA: 61.9 (↑103%)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " * TextVQA: 60.2 (↑5.2%)",
"raw": " * TextVQA: 60.2 (↑5.2%)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " * GQA: 64.9 (↑2.9%)",
"raw": " * GQA: 64.9 (↑2.9%)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "What does this mean? Moondream can now tackle complex document analysis tasks with unprecedented accuracy for a model of its size. From deciphering handwritten notes to interpreting data tables, the applications are vast.",
"raw": "What does this mean? Moondream can now tackle complex document analysis tasks with unprecedented accuracy for a model of its size. From deciphering handwritten notes to interpreting data tables, the applications are vast.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check out the image for a glimpse of Moondream in action, effortlessly extracting insights from a 1944 sugar industry document!",
"raw": "Check out the image for a glimpse of Moondream in action, effortlessly extracting insights from a 1944 sugar industry document!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Why it matters:",
"raw": "Why it matters:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Democratizing AI: As an open-source project, we're making advanced vision AI accessible to all developers.",
"raw": "* Democratizing AI: As an open-source project, we're making advanced vision AI accessible to all developers.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Efficiency: Proving that smaller models can deliver big results.",
"raw": "* Efficiency: Proving that smaller models can deliver big results.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Real-world impact: From historical document analysis to modern business intelligence, the potential use cases are exciting.",
"raw": "* Real-world impact: From historical document analysis to modern business intelligence, the potential use cases are exciting.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Curious to try it out? Try out the live demo here! ",
"raw": "Curious to try it out? Try out the live demo here! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://moondream.ai/playground",
"href": "https://moondream.ai/playground",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀 Exciting news! We've just launched "Thundermoon" - the latest version of Moondream, our open-source vision language model! 🌙
Key improvements in this release:
1. Massive leap in OCR capabilities
2. Enhanced document understanding
3. Significant boosts across key metrics:
* DocVQA: 61.9 (↑103%)
* TextVQA: 60.2 (↑5.2%)
* GQA: 64.9 (↑2.9%)
What does this mean? Moondream can now tackle complex document analysis tasks with unprecedented accuracy for a model of its size. From deciphering handwritten notes to interpreting data tables, the applications are vast.
Check out the image for a glimpse of Moondream in action, effortlessly extracting insights from a 1944 sugar industry document!
Why it matters:
* Democratizing AI: As an open-source project, we're making advanced vision AI accessible to all developers.
* Efficiency: Proving that smaller models can deliver big results.
* Real-world impact: From historical document analysis to modern business intelligence, the potential use cases are exciting.
Curious to try it out? Try out the live demo here! https://moondream.ai/playground | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 375,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63117568fa95534e218da163/V0oHMWY9sul_4MUc11rgv.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"YaTharThShaRma999",
"Tom-Neverwinter",
"catastropiyush",
"rAIfle",
"rbrisita",
"louisbrulenaudet",
"Csplk",
"iky1e"
],
"count": 8
},
{
"reaction": "👍",
"users": [
"ijohn07",
"sequelbox"
],
"count": 2
},
{
"reaction": "❤️",
"users": [
"Csplk",
"twenkid"
],
"count": 2
}
] | 2024-07-23T21:24:02.000Z | 2024-08-15T00:18:09.937Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 375,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6344a1b0762379fc63017e62/g4VIT8l2lZIj6AoQAwVy7.png",
"fullname": "John",
"name": "cmp-nct",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 30,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c6bbbaa1178286a7f8ea418837a6b330.svg",
"fullname": "mnemic",
"name": "mnemic",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 10,
"isFollowing": false
}
] | /posts/vikhyatk/705580461777947 | 3,228 | 4 |
806846801467023 | [
{
"type": "text",
"value": "I just had a masterclass in open-source collaboration with the release of Llama 3.1 🦙🤗",
"raw": "I just had a masterclass in open-source collaboration with the release of Llama 3.1 🦙🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Meta dropped Llama 3.1, and seeing firsthand the Hugging Face team working to integrate it is nothing short of impressive. Their swift integration, comprehensive documentation, and innovative tools showcase the power of open-source teamwork. ",
"raw": "Meta dropped Llama 3.1, and seeing firsthand the Hugging Face team working to integrate it is nothing short of impressive. Their swift integration, comprehensive documentation, and innovative tools showcase the power of open-source teamwork. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "For the curious minds:",
"raw": "For the curious minds:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📊 Check out independent evaluations: ",
"raw": "📊 Check out independent evaluations: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard",
"href": null,
"resource": {
"type": "space",
"id": "open-llm-leaderboard/open_llm_leaderboard",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🧠 Deep dive into the tech: ",
"raw": "🧠 Deep dive into the tech: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/llama31",
"href": "https://huggingface.co/blog/llama31",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👨🍳 Try different recipes (including running 8B on free Colab!): ",
"raw": "👨🍳 Try different recipes (including running 8B on free Colab!): ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/huggingface/huggingface-llama-recipes",
"href": "https://github.com/huggingface/huggingface-llama-recipes",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📈 Visualize open vs. closed LLM progress: ",
"raw": "📈 Visualize open vs. closed LLM progress: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/andrewrreed/closed-vs-open-arena-elo",
"href": null,
"resource": {
"type": "space",
"id": "andrewrreed/closed-vs-open-arena-elo",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/andrewrreed/closed-vs-open-arena-elo",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤖 Generate synthetic data with distilabel, thanks to the new license allowing the use of outputs to train other LLMs ",
"raw": "🤖 Generate synthetic data with distilabel, thanks to the new license allowing the use of outputs to train other LLMs ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/llama31#synthetic-data-generation-with-distilabel",
"href": "https://huggingface.co/blog/llama31#synthetic-data-generation-with-distilabel",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💡 Pro tip: Experience the 405B version for free on HuggingChat, now with tool-calling capabilities! ",
"raw": "💡 Pro tip: Experience the 405B version for free on HuggingChat, now with tool-calling capabilities! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/chat/",
"href": "https://huggingface.co/chat/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "#OpenSourceAI #AIInnovation",
"raw": "#OpenSourceAI #AIInnovation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I just had a masterclass in open-source collaboration with the release of Llama 3.1 🦙🤗
Meta dropped Llama 3.1, and seeing firsthand the Hugging Face team working to integrate it is nothing short of impressive. Their swift integration, comprehensive documentation, and innovative tools showcase the power of open-source teamwork.
For the curious minds:
📊 Check out independent evaluations: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard
🧠 Deep dive into the tech: https://huggingface.co/blog/llama31
👨🍳 Try different recipes (including running 8B on free Colab!): https://github.com/huggingface/huggingface-llama-recipes
📈 Visualize open vs. closed LLM progress: https://huggingface.co/spaces/andrewrreed/closed-vs-open-arena-elo
🤖 Generate synthetic data with distilabel, thanks to the new license allowing the use of outputs to train other LLMs https://huggingface.co/blog/llama31#synthetic-data-generation-with-distilabel
💡 Pro tip: Experience the 405B version for free on HuggingChat, now with tool-calling capabilities! https://huggingface.co/chat/
#OpenSourceAI #AIInnovation | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/QBCQ3w4pegjRLDsa952DV.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"clem",
"gastonmorixe",
"victor",
"chuangxinlezhi",
"Utha",
"R-I-0816",
"fluxthedev",
"osanseviero"
],
"count": 8
},
{
"reaction": "❤️",
"users": [
"clboetticher",
"chuangxinlezhi",
"Utha",
"R-I-0816",
"osanseviero"
],
"count": 5
},
{
"reaction": "🤝",
"users": [
"chuangxinlezhi",
"R-I-0816",
"osanseviero"
],
"count": 3
}
] | 2024-07-23T20:05:45.000Z | 2024-07-23T20:31:02.001Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1763,
"isFollowing": false
}
] | /posts/fdaudens/806846801467023 | 1,951 | 1 |
287713441577435 | [
{
"type": "text",
"value": "Any idea if this \"scheduled\"/\"dynamic\" batch size is available in HF Trainers ? I've never seen it personally",
"raw": "Any idea if this \"scheduled\"/\"dynamic\" batch size is available in HF Trainers ? I've never seen it personally",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Any idea if this "scheduled"/"dynamic" batch size is available in HF Trainers ? I've never seen it personally | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/626237d9bbcbd1c34f1bb231/EJrOjvAL-68qMCYdnvOrq.png",
"fullname": "Ali El Filali",
"name": "alielfilali01",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 186,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/626237d9bbcbd1c34f1bb231/s5tGAm8jlvTzYlRraGIMY.png"
}
] | [] | [] | 2024-07-23T18:06:22.000Z | 2024-07-23T18:06:22.315Z | [] | /posts/alielfilali01/287713441577435 | 719 | 0 |
371684391877712 | [
{
"type": "text",
"value": "🚨 Launching The Visual Haystacks (VHs) Benchmark: the first \"visual-centric\" Needle-In-A-Haystack (NIAH) benchmark to assess LMMs' capability in long-context visual retrieval and reasoning. ",
"raw": "🚨 Launching The Visual Haystacks (VHs) Benchmark: the first \"visual-centric\" Needle-In-A-Haystack (NIAH) benchmark to assess LMMs' capability in long-context visual retrieval and reasoning. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check it out!",
"raw": "Check it out!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/tsunghanwu/visual_haystacks",
"href": null,
"resource": {
"type": "dataset",
"id": "tsunghanwu/visual_haystacks",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/tsunghanwu/visual_haystacks",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://visual-haystacks.github.io/",
"href": "https://visual-haystacks.github.io/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2407.13766",
"href": "https://arxiv.org/abs/2407.13766",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/visual-haystacks/vhs_benchmark",
"href": "https://github.com/visual-haystacks/vhs_benchmark",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚨 Launching The Visual Haystacks (VHs) Benchmark: the first "visual-centric" Needle-In-A-Haystack (NIAH) benchmark to assess LMMs' capability in long-context visual retrieval and reasoning.
Check it out!
https://huggingface.co/datasets/tsunghanwu/visual_haystacks
https://visual-haystacks.github.io/
https://arxiv.org/abs/2407.13766
https://github.com/visual-haystacks/vhs_benchmark | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669920369989-noauth.jpeg",
"fullname": "David Chan",
"name": "davidchan",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 6,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"osanseviero"
],
"count": 1
}
] | 2024-07-23T15:15:27.000Z | 2024-07-23T15:15:27.067Z | [] | /posts/davidchan/371684391877712 | 542 | 0 |
213182541433936 | [
{
"type": "text",
"value": "I just launched an exciting new multiplayer app powered by GPT-4o, enabling collaborative AI-driven queries in a single shared session! ",
"raw": "I just launched an exciting new multiplayer app powered by GPT-4o, enabling collaborative AI-driven queries in a single shared session! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "### 🔗 Try It Out! 👉 Check out the GPT-4o Multiplayer App ",
"raw": "### 🔗 Try It Out! 👉 Check out the GPT-4o Multiplayer App ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Experience the future of collaborative AI by visiting our space on Hugging Face: ",
"raw": "Experience the future of collaborative AI by visiting our space on Hugging Face: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/awacke1/ChatStreamlitMultiplayer",
"href": null,
"resource": {
"type": "space",
"id": "awacke1/ChatStreamlitMultiplayer",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/awacke1/ChatStreamlitMultiplayer",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🎉 This innovative tool lets you and your team reason over:",
"raw": "🎉 This innovative tool lets you and your team reason over:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "###📝 Text",
"raw": "###📝 Text",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "###🖼️ Image",
"raw": "###🖼️ Image",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "###🎵 Audio",
"raw": "###🎵 Audio",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "###🎥 Video",
"raw": "###🎥 Video",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "## 🔍 Key Features",
"raw": "## 🔍 Key Features",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "### Shared Contributions",
"raw": "### Shared Contributions",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Collaborate in real-time, seeing each other's inputs and contributions.",
"raw": "Collaborate in real-time, seeing each other's inputs and contributions.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Enhances teamwork and fosters a collective approach to problem-solving.",
"raw": "Enhances teamwork and fosters a collective approach to problem-solving.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "### Diverse Media Integration",
"raw": "### Diverse Media Integration",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Seamlessly analyze and reason with text, images, audio, and video.",
"raw": "Seamlessly analyze and reason with text, images, audio, and video.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Breakthrough capabilities in handling complex media types, including air traffic control images and audio.",
"raw": "Breakthrough capabilities in handling complex media types, including air traffic control images and audio.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "## 🛠️ Real-World Testing",
"raw": "## 🛠️ Real-World Testing",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This morning, we tested the app using images and audio from air traffic control—a challenge that was nearly impossible to handle with ease just a few years ago. 🚁💬",
"raw": "This morning, we tested the app using images and audio from air traffic control—a challenge that was nearly impossible to handle with ease just a few years ago. 🚁💬",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌱 The Future of AI Collaboration",
"raw": "🌱 The Future of AI Collaboration",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We believe AI Pair Programming is evolving into a new era of intelligence through shared contributions and teamwork. As we continue to develop, this app will enable groups to:",
"raw": "We believe AI Pair Programming is evolving into a new era of intelligence through shared contributions and teamwork. As we continue to develop, this app will enable groups to:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Generate detailed text responses 📝",
"raw": "Generate detailed text responses 📝",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Collaborate on code responses 💻",
"raw": "Collaborate on code responses 💻",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Develop new AI programs together 🤖",
"raw": "Develop new AI programs together 🤖",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I just launched an exciting new multiplayer app powered by GPT-4o, enabling collaborative AI-driven queries in a single shared session!
### 🔗 Try It Out! 👉 Check out the GPT-4o Multiplayer App
Experience the future of collaborative AI by visiting our space on Hugging Face: https://huggingface.co/spaces/awacke1/ChatStreamlitMultiplayer
🎉 This innovative tool lets you and your team reason over:
###📝 Text
###🖼️ Image
###🎵 Audio
###🎥 Video
## 🔍 Key Features
### Shared Contributions
Collaborate in real-time, seeing each other's inputs and contributions.
Enhances teamwork and fosters a collective approach to problem-solving.
### Diverse Media Integration
Seamlessly analyze and reason with text, images, audio, and video.
Breakthrough capabilities in handling complex media types, including air traffic control images and audio.
## 🛠️ Real-World Testing
This morning, we tested the app using images and audio from air traffic control—a challenge that was nearly impossible to handle with ease just a few years ago. 🚁💬
🌱 The Future of AI Collaboration
We believe AI Pair Programming is evolving into a new era of intelligence through shared contributions and teamwork. As we continue to develop, this app will enable groups to:
Generate detailed text responses 📝
Collaborate on code responses 💻
Develop new AI programs together 🤖 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1656147940537-620630b603825909dcbeba35.jpeg",
"fullname": "Aaron C Wacker",
"name": "awacke1",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 185,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🚀",
"users": [
"victor"
],
"count": 1
}
] | 2024-07-23T11:34:36.000Z | 2024-07-23T11:34:36.131Z | [] | /posts/awacke1/213182541433936 | 1,342 | 0 |
546261052602944 | [
{
"type": "text",
"value": "We ",
"raw": "We ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://mii-llm.ai",
"href": "https://mii-llm.ai",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " just released a new LLM Italian benchmark and a set of evaluation: MMLU-PRO-ITA",
"raw": " just released a new LLM Italian benchmark and a set of evaluation: MMLU-PRO-ITA",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Thanks to ",
"raw": "Thanks to ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@efederici",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "efederici",
"label": null,
"lang": null
},
{
"type": "text",
"value": " who released ",
"raw": " who released ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/efederici/MMLU-Pro-ita",
"href": null,
"resource": {
"type": "dataset",
"id": "efederici/MMLU-Pro-ita",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/efederici/MMLU-Pro-ita",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " a machine translated version of MMLU-PRO and thanks to a community shared computational effort we published in the \"Eval Aggiuntive\" tab of ",
"raw": " a machine translated version of MMLU-PRO and thanks to a community shared computational effort we published in the \"Eval Aggiuntive\" tab of ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/spaces/FinancialSupport/open_ita_llm_leaderboard",
"href": "https://huggingface.co/spaces/FinancialSupport/open_ita_llm_leaderboard",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " the results on Italian open source LLMs. ",
"raw": " the results on Italian open source LLMs. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you want to deepen read the blog article on hf ",
"raw": "If you want to deepen read the blog article on hf ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/giux78/mmlu-pro-ita",
"href": "https://huggingface.co/blog/giux78/mmlu-pro-ita",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | We https://mii-llm.ai just released a new LLM Italian benchmark and a set of evaluation: MMLU-PRO-ITA
Thanks to @efederici who released https://huggingface.co/datasets/efederici/MMLU-Pro-ita a machine translated version of MMLU-PRO and thanks to a community shared computational effort we published in the "Eval Aggiuntive" tab of https://huggingface.co/spaces/FinancialSupport/open_ita_llm_leaderboard the results on Italian open source LLMs.
If you want to deepen read the blog article on hf https://huggingface.co/blog/giux78/mmlu-pro-ita | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5fef4eb7770b06e11c2c6381/1NMdigjCGtn0yvQZSi5NJ.png",
"fullname": "Alessandro Ercolani",
"name": "giux78",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 44,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/612246596d9ce900691744d2/9DlHVQDqblKz7QPTA6nDa.jpeg",
"fullname": "Edoardo Federici",
"name": "efederici",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 28
}
] | [
{
"reaction": "🚀",
"users": [
"giux78",
"Ramikan-BR",
"osanseviero"
],
"count": 3
},
{
"reaction": "❤️",
"users": [
"Ramikan-BR",
"anakin87"
],
"count": 2
},
{
"reaction": "🔥",
"users": [
"Ramikan-BR"
],
"count": 1
},
{
"reaction": "👀",
"users": [
"Ramikan-BR"
],
"count": 1
},
{
"reaction": "😔",
"users": [
"ZeroWw"
],
"count": 1
}
] | 2024-07-23T10:16:30.000Z | 2024-07-23T10:16:30.860Z | [] | /posts/giux78/546261052602944 | 1,639 | 0 |
537873019611478 | [
{
"type": "text",
"value": "[New tool] Follow interesting ML persons 👩🎨 👨🎤 👩🏫 with Followgraph",
"raw": "[New tool] Follow interesting ML persons 👩🎨 👨🎤 👩🏫 with Followgraph",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/severo/followgraph",
"href": null,
"resource": {
"type": "space",
"id": "severo/followgraph",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/severo/followgraph",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Please try it and tell me if it helped you discover high-quality content 👍 👎",
"raw": "Please try it and tell me if it helped you discover high-quality content 👍 👎",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I repurposed \"Followgraph for Mastodon\" (",
"raw": "I repurposed \"Followgraph for Mastodon\" (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://followgraph.vercel.app/",
"href": "https://followgraph.vercel.app/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ").",
"raw": ").",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "My new follows: ",
"raw": "My new follows: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@TheBloke",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "TheBloke",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@mlabonne",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "mlabonne",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@teknium",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "teknium",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@KnutJaegersberg",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "KnutJaegersberg",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@SkalskiP",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "SkalskiP",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@AmelieSchreiber",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "AmelieSchreiber",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@lbourdois",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "lbourdois",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ceyda",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ceyda",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@andrewyng",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "andrewyng",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Pclanglais",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Pclanglais",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@karpathy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "karpathy",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "And you?",
"raw": "And you?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | [New tool] Follow interesting ML persons 👩🎨 👨🎤 👩🏫 with Followgraph
https://huggingface.co/spaces/severo/followgraph
Please try it and tell me if it helped you discover high-quality content 👍 👎
I repurposed "Followgraph for Mastodon" (https://followgraph.vercel.app/).
My new follows: @TheBloke @mlabonne @teknium @KnutJaegersberg @SkalskiP @AmelieSchreiber @lbourdois @ceyda @andrewyng @Pclanglais @karpathy
And you? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60a76b174e24361791fe822d/inEvYwrd4z0xvRQN3ikdE.jpeg",
"fullname": "Sylvain Lesage",
"name": "severo",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 129,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60a76b174e24361791fe822d/iu1G6M41TwBhAbaXTgFQj.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64191ec8d459c9e7fbb0236b/7BeTgySZzmFCaVpntaYgP.jpeg",
"fullname": "Amelie Schreiber",
"name": "AmelieSchreiber",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 737
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6467fcf0946476c5d2194c14/zAy7PYR3HkC9NWcpZw8X1.png",
"fullname": "Andrew Ng",
"name": "andrewyng",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 178
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1623134857336-5f7c2cbbb1a525442ff96e39.jpeg",
"fullname": "Ceyda Cinarel",
"name": "ceyda",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 80
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1660434061546-62f83661fe21cc4875221c0f.jpeg",
"fullname": "Andrej K",
"name": "karpathy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 476
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg",
"fullname": "Knut Jägersberg",
"name": "KnutJaegersberg",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 238
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/613b0a62a14099d5afed7830/pLuqSIYaNYhUqdjxlNrFn.png",
"fullname": "Loïck BOURDOIS",
"name": "lbourdois",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 90
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png",
"fullname": "Maxime Labonne",
"name": "mlabonne",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 3486
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64ce091a9e9ca8123d7a42b0/OEPggp82RwigxNLL35LgT.jpeg",
"fullname": "Pierre-Carl Langlais",
"name": "Pclanglais",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60f84d4d85dbbb185d2e9a53/Mlc0XjAgQR2cuhGNchz07.jpeg",
"fullname": "Piotr Skalski",
"name": "SkalskiP",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 2318
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6317aade83d8d2fd903192d9/erOwgMXc_CZih3uMoyTAp.jpeg",
"fullname": "Teknium",
"name": "teknium",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4267
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg",
"fullname": "Tom Jobbins",
"name": "TheBloke",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 22212
}
] | [
{
"reaction": "🚀",
"users": [
"gokaygokay",
"Ramikan-BR",
"mmhamdy",
"lhoestq",
"appvoid",
"kramp",
"merve",
"mikestaub",
"osanseviero",
"lbourdois",
"louisbrulenaudet",
"ZennyKenny"
],
"count": 12
},
{
"reaction": "❤️",
"users": [
"mlabonne",
"Ramikan-BR",
"mmhamdy",
"lhoestq",
"merve",
"mikestaub",
"clem",
"osanseviero",
"lbourdois",
"efecelik"
],
"count": 10
}
] | 2024-07-23T10:07:25.000Z | 2024-07-31T08:19:18.768Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b8e2ba285851687028d395/JtUGAwVh_4cDEsjNcfpye.png",
"fullname": "Maxime Labonne",
"name": "mlabonne",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 3486,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg",
"fullname": "appvoid",
"name": "appvoid",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 35,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60a76b174e24361791fe822d/inEvYwrd4z0xvRQN3ikdE.jpeg",
"fullname": "Sylvain Lesage",
"name": "severo",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 129,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6459fa0f5b3111fbe83286e1/UhCa7JNbtTjC6dgOjZtH0.jpeg",
"fullname": "Louis Brulé Naudet",
"name": "louisbrulenaudet",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 174,
"isFollowing": false
}
] | /posts/severo/537873019611478 | 3,438 | 5 |
347522688239080 | [
{
"type": "text",
"value": "🙋🏻♂️ Hey there folks ",
"raw": "🙋🏻♂️ Hey there folks ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "made a demo for Nvidia Minitron on an A100. ",
"raw": "made a demo for Nvidia Minitron on an A100. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's Nemotron-4 15B model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models.",
"raw": "Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's Nemotron-4 15B model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Deriving the Minitron 8B and 4B models from the base 15B model using our approach requires up to 40x fewer training tokens per model compared to training from scratch; this results in compute cost savings of 1.8x for training the full model family (15B, 8B, and 4B). Minitron models exhibit up to a 16% improvement in MMLU scores compared to training from scratch, perform comparably to other community models such as Mistral 7B, Gemma 7B and Llama-3 8B, and outperform state-of-the-art compression techniques from the literature. Please refer to our arXiv paper for more details.",
"raw": "Deriving the Minitron 8B and 4B models from the base 15B model using our approach requires up to 40x fewer training tokens per model compared to training from scratch; this results in compute cost savings of 1.8x for training the full model family (15B, 8B, and 4B). Minitron models exhibit up to a 16% improvement in MMLU scores compared to training from scratch, perform comparably to other community models such as Mistral 7B, Gemma 7B and Llama-3 8B, and outperform state-of-the-art compression techniques from the literature. Please refer to our arXiv paper for more details.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Minitron models are for research and development only.",
"raw": "Minitron models are for research and development only.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "source : ",
"raw": "source : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/nvidia/Minitron-8B-Base",
"href": null,
"resource": {
"type": "model",
"id": "nvidia/Minitron-8B-Base",
"discussionNum": null
},
"url": "https://huggingface.co/nvidia/Minitron-8B-Base",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "demo : ",
"raw": "demo : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Tonic/Minitron",
"href": null,
"resource": {
"type": "space",
"id": "Tonic/Minitron",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Tonic/Minitron",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🙋🏻♂️ Hey there folks
made a demo for Nvidia Minitron on an A100.
Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's Nemotron-4 15B model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models.
Deriving the Minitron 8B and 4B models from the base 15B model using our approach requires up to 40x fewer training tokens per model compared to training from scratch; this results in compute cost savings of 1.8x for training the full model family (15B, 8B, and 4B). Minitron models exhibit up to a 16% improvement in MMLU scores compared to training from scratch, perform comparably to other community models such as Mistral 7B, Gemma 7B and Llama-3 8B, and outperform state-of-the-art compression techniques from the literature. Please refer to our arXiv paper for more details.
Minitron models are for research and development only.
source : https://huggingface.co/nvidia/Minitron-8B-Base
demo : https://huggingface.co/spaces/Tonic/Minitron | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg",
"fullname": "Joseph [open/acc] Pollack",
"name": "Tonic",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 313,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"nicolay-r",
"TuringsSolutions",
"ZeroWw",
"clem",
"Nymbo",
"osanseviero",
"louisbrulenaudet",
"Tonic"
],
"count": 8
},
{
"reaction": "❤️",
"users": [
"clem",
"osanseviero"
],
"count": 2
}
] | 2024-07-23T09:55:50.000Z | 2024-07-23T18:21:10.710Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6569216f9c96f1a47bf45788/mCLqmAs4dOjKdxNQVAp1w.png",
"fullname": "Sica Rius",
"name": "SicariusSicariiStuff",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 135,
"isFollowing": false
}
] | /posts/Tonic/347522688239080 | 1,716 | 1 |
608490648185683 | [
{
"type": "text",
"value": "🔥Thrilled to release our 8B version of Symbol-LLM-Instruct ! ",
"raw": "🔥Thrilled to release our 8B version of Symbol-LLM-Instruct ! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It follows the two-stage training strategy proposed in the original paper and is continually optimized on LLaMA3-Chat-8B model.",
"raw": "It follows the two-stage training strategy proposed in the original paper and is continually optimized on LLaMA3-Chat-8B model.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Symbol-LLM was accepted by ACL'24 main conference ! See you in Thailand !",
"raw": "Symbol-LLM was accepted by ACL'24 main conference ! See you in Thailand !",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper link: ",
"raw": "Paper link: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2311.09278",
"href": "https://arxiv.org/abs/2311.09278",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper Title: Symbol-LLM: Towards Foundational Symbol-centric Interface For Large Language Models",
"raw": "Paper Title: Symbol-LLM: Towards Foundational Symbol-centric Interface For Large Language Models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🔥Thrilled to release our 8B version of Symbol-LLM-Instruct !
It follows the two-stage training strategy proposed in the original paper and is continually optimized on LLaMA3-Chat-8B model.
Symbol-LLM was accepted by ACL'24 main conference ! See you in Thailand !
Paper link: https://arxiv.org/abs/2311.09278
Paper Title: Symbol-LLM: Towards Foundational Symbol-centric Interface For Large Language Models
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/656d73ed0bbc114fe6449704/gpteBU9GmKSHRVkRBUHld.png",
"fullname": "Symbol-LLM",
"name": "Symbol-LLM",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 29,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🚀",
"users": [
"Symbol-LLM",
"Xdotnet",
"Ramdevkijai",
"louisbrulenaudet",
"osanseviero"
],
"count": 5
},
{
"reaction": "🔥",
"users": [
"nicolay-r",
"ToKrCZ",
"osanseviero"
],
"count": 3
},
{
"reaction": "🤗",
"users": [
"Symbol-LLM"
],
"count": 1
},
{
"reaction": "😎",
"users": [
"ZeroWw"
],
"count": 1
}
] | 2024-07-23T08:19:31.000Z | 2024-07-25T20:50:47.254Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868,
"isFollowing": false
}
] | /posts/Symbol-LLM/608490648185683 | 2,117 | 1 |
309759264789106 | [
{
"type": "text",
"value": "How to create custom LLMs from scratch",
"raw": "How to create custom LLMs from scratch",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "See my new podcast on this topic, at ",
"raw": "See my new podcast on this topic, at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://mltblog.com/3xS1bf5",
"href": "https://mltblog.com/3xS1bf5",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Despite GPT, Claude, Gemini, LLama and the other host of LLMs that we have access to, a variety of organizations are still exploring their options when it comes to custom LLMs. Logging in to ChatGPT is easy enough, and so is creating a ‘custom’ openAI GPT, but what does it take to create a truly custom LLM? When and why might this be useful, and will it be worth the effort?",
"raw": "Despite GPT, Claude, Gemini, LLama and the other host of LLMs that we have access to, a variety of organizations are still exploring their options when it comes to custom LLMs. Logging in to ChatGPT is easy enough, and so is creating a ‘custom’ openAI GPT, but what does it take to create a truly custom LLM? When and why might this be useful, and will it be worth the effort?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | How to create custom LLMs from scratch
See my new podcast on this topic, at https://mltblog.com/3xS1bf5
Despite GPT, Claude, Gemini, LLama and the other host of LLMs that we have access to, a variety of organizations are still exploring their options when it comes to custom LLMs. Logging in to ChatGPT is easy enough, and so is creating a ‘custom’ openAI GPT, but what does it take to create a truly custom LLM? When and why might this be useful, and will it be worth the effort? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png",
"fullname": "Vincent Granville",
"name": "vincentg64",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/669c89e98f2dbc203f9e74ab/0MywN8McYQVfu2fXSn_Mm.png"
}
] | [] | [
{
"reaction": "🚀",
"users": [
"nicolay-r",
"victor",
"SFBAI"
],
"count": 3
}
] | 2024-07-23T06:10:05.000Z | 2024-07-23T06:10:26.640Z | [] | /posts/vincentg64/309759264789106 | 1,235 | 0 |
111964148485189 | [
{
"type": "text",
"value": "LazyLLM - Unusual Colab (Apple & Meta) Yields Impactful Work",
"raw": "LazyLLM - Unusual Colab (Apple & Meta) Yields Impactful Work",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "LLM inference typically consists of two stages: prefilling/tokenizing and decoding. In the prefilling stage, the model processes the entire input prompt, computing and caching key-value (KV) pairs for each token, which can be time-consuming for long prompts. This is followed by the decoding stage, where the model generates tokens sequentially, reusing the cached KVs. ",
"raw": "LLM inference typically consists of two stages: prefilling/tokenizing and decoding. In the prefilling stage, the model processes the entire input prompt, computing and caching key-value (KV) pairs for each token, which can be time-consuming for long prompts. This is followed by the decoding stage, where the model generates tokens sequentially, reusing the cached KVs. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "LazyLLM introduces a dynamic token pruning technique. Instead of computing KVs for all tokens during prefilling, LazyLLM selectively processes only the most important tokens based on attention scores, deferring less important ones to later steps if needed. It uses progressive token pruning across transformer layers and introduces an Aux Cache to store hidden states of pruned tokens. ",
"raw": "LazyLLM introduces a dynamic token pruning technique. Instead of computing KVs for all tokens during prefilling, LazyLLM selectively processes only the most important tokens based on attention scores, deferring less important ones to later steps if needed. It uses progressive token pruning across transformer layers and introduces an Aux Cache to store hidden states of pruned tokens. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This approach significantly reduces the time-to-first-token (TTFT) and overall generation time while maintaining accuracy across various tasks. LazyLLM outperforms baseline techniques like random token dropping and static pruning, and can be easily integrated into existing LLMs without fine-tuning, offering a practical solution for accelerating LLM inference, especially in long context scenarios.",
"raw": "This approach significantly reduces the time-to-first-token (TTFT) and overall generation time while maintaining accuracy across various tasks. LazyLLM outperforms baseline techniques like random token dropping and static pruning, and can be easily integrated into existing LLMs without fine-tuning, offering a practical solution for accelerating LLM inference, especially in long context scenarios.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "IN SIMPLE TERMS",
"raw": "IN SIMPLE TERMS",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "When you prompt a large language model (LLM), it usually looks at every single word/subword (or tokens) in your prompt before generating a response. This can be time consuming, especially for prompts with very long texts. This paper introduces a new technique that solves this problem by being more selective. Instead of looking at every word right away, it only focuses on the most important words first. It decides which words are important based on how much attention the model gives them. If it needs other words later, it can go back and look at them then. This approach is like skimming a text for key information before reading it in detail.",
"raw": "When you prompt a large language model (LLM), it usually looks at every single word/subword (or tokens) in your prompt before generating a response. This can be time consuming, especially for prompts with very long texts. This paper introduces a new technique that solves this problem by being more selective. Instead of looking at every word right away, it only focuses on the most important words first. It decides which words are important based on how much attention the model gives them. If it needs other words later, it can go back and look at them then. This approach is like skimming a text for key information before reading it in detail.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Read More: ",
"raw": "Read More: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/pdf/2407.14057",
"href": "https://arxiv.org/pdf/2407.14057",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | LazyLLM - Unusual Colab (Apple & Meta) Yields Impactful Work
LLM inference typically consists of two stages: prefilling/tokenizing and decoding. In the prefilling stage, the model processes the entire input prompt, computing and caching key-value (KV) pairs for each token, which can be time-consuming for long prompts. This is followed by the decoding stage, where the model generates tokens sequentially, reusing the cached KVs.
LazyLLM introduces a dynamic token pruning technique. Instead of computing KVs for all tokens during prefilling, LazyLLM selectively processes only the most important tokens based on attention scores, deferring less important ones to later steps if needed. It uses progressive token pruning across transformer layers and introduces an Aux Cache to store hidden states of pruned tokens.
This approach significantly reduces the time-to-first-token (TTFT) and overall generation time while maintaining accuracy across various tasks. LazyLLM outperforms baseline techniques like random token dropping and static pruning, and can be easily integrated into existing LLMs without fine-tuning, offering a practical solution for accelerating LLM inference, especially in long context scenarios.
IN SIMPLE TERMS
When you prompt a large language model (LLM), it usually looks at every single word/subword (or tokens) in your prompt before generating a response. This can be time consuming, especially for prompts with very long texts. This paper introduces a new technique that solves this problem by being more selective. Instead of looking at every word right away, it only focuses on the most important words first. It decides which words are important based on how much attention the model gives them. If it needs other words later, it can go back and look at them then. This approach is like skimming a text for key information before reading it in detail.
Read More: https://arxiv.org/pdf/2407.14057 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/3yYj8D7xtL1rgi_KHs-aO.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/IryreY8BXH2QmGyvAkE8k.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/zvvrP_VO4qFr3dKngJ81O.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/5_TduEda-J9JIRc0ABbKC.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/0WMekl3H9Wnizay2h0k3x.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"ZeroWw",
"nicolay-r",
"joelbryan",
"osanseviero"
],
"count": 4
}
] | 2024-07-23T01:16:16.000Z | 2024-07-23T19:22:26.671Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
}
] | /posts/Jaward/111964148485189 | 1,345 | 1 |
705751692421448 | [
{
"type": "text",
"value": "SNN Image Diffusion V2",
"raw": "SNN Image Diffusion V2",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Billionaires have been made for less than this. This is only one of the things it can it do. It can do API calls, function calls, optimize poker and blackjack odds, anything that is an optimization problem. It costs fractions of a penny and requires fractions of the compute of an LLM model. It can even communicate two ways with an LLM model.",
"raw": "Billionaires have been made for less than this. This is only one of the things it can it do. It can do API calls, function calls, optimize poker and blackjack odds, anything that is an optimization problem. It costs fractions of a penny and requires fractions of the compute of an LLM model. It can even communicate two ways with an LLM model.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | SNN Image Diffusion V2
Billionaires have been made for less than this. This is only one of the things it can it do. It can do API calls, function calls, optimize poker and blackjack odds, anything that is an optimization problem. It costs fractions of a penny and requires fractions of the compute of an LLM model. It can even communicate two ways with an LLM model.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/efHgrIMNS0ICfq7mLdP_T.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"zikazach",
"Xdotnet",
"nicolay-r"
],
"count": 3
}
] | 2024-07-22T23:46:21.000Z | 2024-07-23T21:46:14.484Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg",
"fullname": "Firstname Lastname",
"name": "takeraparterer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 29,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
},
{
"avatarUrl": "/avatars/0087f207c06a793c55ed0489ff793e70.svg",
"fullname": "nicolo",
"name": "nicolollo",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/TuringsSolutions/705751692421448 | 1,379 | 19 |
624836925605809 | [
{
"type": "text",
"value": "Hi HuggingFacers!🤗",
"raw": "Hi HuggingFacers!🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Good news concerning ",
"raw": "Good news concerning ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/as-cle-bert/smolLM-arena",
"href": null,
"resource": {
"type": "space",
"id": "as-cle-bert/smolLM-arena",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/as-cle-bert/smolLM-arena",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ", the chat arena where you can compare some of the Small Language Models (<1.7B) on the Hub and cast your vote to choose the best!📱",
"raw": ", the chat arena where you can compare some of the Small Language Models (<1.7B) on the Hub and cast your vote to choose the best!📱",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The space now has a new interface with chatbots instead of textboxs, it runs faster and it also comes with usage instructions :)",
"raw": "The space now has a new interface with chatbots instead of textboxs, it runs faster and it also comes with usage instructions :)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Have fun!🍕",
"raw": "Have fun!🍕",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hi HuggingFacers!🤗
Good news concerning https://huggingface.co/spaces/as-cle-bert/smolLM-arena, the chat arena where you can compare some of the Small Language Models (<1.7B) on the Hub and cast your vote to choose the best!📱
The space now has a new interface with chatbots instead of textboxs, it runs faster and it also comes with usage instructions :)
Have fun!🍕 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e330e7edc2f7306e252448/ucpk9c8x0UafGM4mXTrRy.jpeg",
"fullname": "Astra Clelia Bertelli",
"name": "as-cle-bert",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 650,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"prithivMLmods",
"nicolay-r",
"Ramikan-BR",
"cahlen",
"osanseviero",
"quyettv",
"louisbrulenaudet"
],
"count": 7
},
{
"reaction": "🚀",
"users": [
"Ramikan-BR",
"Felladrin"
],
"count": 2
},
{
"reaction": "🔥",
"users": [
"Ramikan-BR"
],
"count": 1
},
{
"reaction": "👀",
"users": [
"Ramikan-BR"
],
"count": 1
}
] | 2024-07-22T21:39:50.000Z | 2024-07-22T21:39:50.217Z | [] | /posts/as-cle-bert/624836925605809 | 1,401 | 0 |
284224143230669 | [
{
"type": "text",
"value": "Introducing Whisper Diarization: Multilingual speech recognition with word-level timestamps and speaker segmentation, running 100% locally in your browser thanks to 🤗 Transformers.js!",
"raw": "Introducing Whisper Diarization: Multilingual speech recognition with word-level timestamps and speaker segmentation, running 100% locally in your browser thanks to 🤗 Transformers.js!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Tested on this iconic Letterman interview w/ Grace Hopper from 1983!",
"raw": "Tested on this iconic Letterman interview w/ Grace Hopper from 1983!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Demo: ",
"raw": "- Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Xenova/whisper-speaker-diarization",
"href": null,
"resource": {
"type": "space",
"id": "Xenova/whisper-speaker-diarization",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Xenova/whisper-speaker-diarization",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Source code: ",
"raw": "- Source code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Xenova/whisper-speaker-diarization/tree/main/whisper-speaker-diarization",
"href": null,
"resource": {
"type": "space",
"id": "Xenova/whisper-speaker-diarization",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/Xenova/whisper-speaker-diarization/tree/main/whisper-speaker-diarization",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Introducing Whisper Diarization: Multilingual speech recognition with word-level timestamps and speaker segmentation, running 100% locally in your browser thanks to 🤗 Transformers.js!
Tested on this iconic Letterman interview w/ Grace Hopper from 1983!
- Demo: https://huggingface.co/spaces/Xenova/whisper-speaker-diarization
- Source code: https://huggingface.co/spaces/Xenova/whisper-speaker-diarization/tree/main/whisper-speaker-diarization | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61b253b7ac5ecaae3d1efe0c/hwiQ0uvz3t-L5a-NtBIO6.png",
"fullname": "Joshua",
"name": "Xenova",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 3792,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/61b253b7ac5ecaae3d1efe0c/cLCFwAvCkTp-hM6eY1B4q.mp4"
}
] | [] | [
{
"reaction": "👍",
"users": [
"freinold",
"ZeroWw",
"John6666",
"DmitryRyumin",
"okamirvs",
"Nymbo",
"ssboost",
"maruyasa",
"Deddy",
"GPT007",
"sudanenator",
"Rsln",
"dave3991",
"bmorphism",
"toshvelaga",
"osanseviero",
"d8rt8v",
"devstockgirl"
],
"count": 18
},
{
"reaction": "🔥",
"users": [
"ssboost",
"prithivMLmods",
"Sylvestre",
"Deddy",
"nicolay-r",
"Gatozu35",
"AARon99",
"toshvelaga",
"osanseviero"
],
"count": 9
},
{
"reaction": "❤️",
"users": [
"julien-rodriguez",
"DataSoul",
"BoscoTheDog",
"clem",
"toshvelaga",
"osanseviero"
],
"count": 6
}
] | 2024-07-22T20:30:29.000Z | 2024-07-23T20:05:06.948Z | [
{
"avatarUrl": "/avatars/afbc48df2e8c47c35be48168113d83c0.svg",
"fullname": "s",
"name": "Tom-Neverwinter",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
}
] | /posts/Xenova/284224143230669 | 7,890 | 1 |
689972259553494 | [
{
"type": "text",
"value": "\"By the end of this blog post, you will have ",
"raw": "\"By the end of this blog post, you will have ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- learnt all the new goodies accompanying the latest macOS release ",
"raw": "- learnt all the new goodies accompanying the latest macOS release ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- AND successfully run a 7B parameter model using less than 4GB of memory on your Mac.\"",
"raw": "- AND successfully run a 7B parameter model using less than 4GB of memory on your Mac.\"",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Game-changer for local AI? Can't wait to try this! ",
"raw": "Game-changer for local AI? Can't wait to try this! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Brillant work by ",
"raw": "Brillant work by ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@pcuenq",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "pcuenq",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@osanseviero",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "osanseviero",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@reach-vb",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "reach-vb",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@FL33TW00D-HF",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "FL33TW00D-HF",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check it out: ",
"raw": "Check it out: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/mistral-coreml",
"href": "https://huggingface.co/blog/mistral-coreml",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " #apple ",
"raw": " #apple ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | "By the end of this blog post, you will have
- learnt all the new goodies accompanying the latest macOS release
- AND successfully run a 7B parameter model using less than 4GB of memory on your Mac."
Game-changer for local AI? Can't wait to try this!
Brillant work by @pcuenq @osanseviero @reach-vb @FL33TW00D-HF
Check it out: https://huggingface.co/blog/mistral-coreml #apple | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/NwjC-VUQBapnb50qkopv4.mp4"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6597e9f42235d4056bc6980a/6N_Eira5Rj5e8ZdgekKPQ.jpeg",
"fullname": "Christopher Fleetwood",
"name": "FL33TW00D-HF",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 54
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6032802e1f993496bc14d9e3/w6hr-DEQot4VVkoyRIBiy.png",
"fullname": "Omar Sanseviero",
"name": "osanseviero",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2868
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1617264212503-603d25b75f9d390ab190b777.jpeg",
"fullname": "Pedro Cuenca",
"name": "pcuenq",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 444
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg",
"fullname": "Vaibhav Srivastav",
"name": "reach-vb",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 460
}
] | [
{
"reaction": "🔥",
"users": [
"enzolib",
"FL33TW00D-HF",
"szymonrucinski"
],
"count": 3
}
] | 2024-07-22T18:09:25.000Z | 2024-07-22T21:03:17.173Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
}
] | /posts/fdaudens/689972259553494 | 656 | 1 |
421245113676574 | [
{
"type": "text",
"value": "🎉 Ghost 8B Beta Released: Game-Changing Language Model",
"raw": "🎉 Ghost 8B Beta Released: Game-Changing Language Model",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "--",
"raw": "--",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Ghost 8B Beta is a groundbreaking language model developed with a clear vision: to deliver exceptional multilingual support, superior knowledge capabilities, and all while remaining cost-effective. This model comes in two context length variations, 8k and 128k, ensuring flexibility for various tasks. Moreover, it boasts built-in multilingual functionality, making it a powerful tool for global communication and understanding.",
"raw": "Ghost 8B Beta is a groundbreaking language model developed with a clear vision: to deliver exceptional multilingual support, superior knowledge capabilities, and all while remaining cost-effective. This model comes in two context length variations, 8k and 128k, ensuring flexibility for various tasks. Moreover, it boasts built-in multilingual functionality, making it a powerful tool for global communication and understanding.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "--",
"raw": "--",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* See detailed article: ",
"raw": "* See detailed article: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/lamhieu/ghost-8b-beta-released-game-changing-language-mode",
"href": "https://huggingface.co/blog/lamhieu/ghost-8b-beta-released-game-changing-language-mode",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Model card: ",
"raw": "* Model card: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/ghost-x/ghost-8b-beta",
"href": null,
"resource": {
"type": "model",
"id": "ghost-x/ghost-8b-beta",
"discussionNum": null
},
"url": "https://huggingface.co/ghost-x/ghost-8b-beta",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "* Official website: ",
"raw": "* Official website: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://ghost-x.org/docs/models/ghost-8b-beta",
"href": "https://ghost-x.org/docs/models/ghost-8b-beta",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🎉 Ghost 8B Beta Released: Game-Changing Language Model
--
Ghost 8B Beta is a groundbreaking language model developed with a clear vision: to deliver exceptional multilingual support, superior knowledge capabilities, and all while remaining cost-effective. This model comes in two context length variations, 8k and 128k, ensuring flexibility for various tasks. Moreover, it boasts built-in multilingual functionality, making it a powerful tool for global communication and understanding.
--
* See detailed article: https://huggingface.co/blog/lamhieu/ghost-8b-beta-released-game-changing-language-mode
* Model card: https://huggingface.co/ghost-x/ghost-8b-beta
* Official website: https://ghost-x.org/docs/models/ghost-8b-beta | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/600ae38cc92b79f54efd4556/cSqRIslYl5L3I4WK3a31f.png",
"fullname": "Hieu Lam",
"name": "lamhieu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 74,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"ZeroWw",
"danielus",
"nicolay-r",
"ecyht2",
"John6666",
"MDalprato",
"louisbrulenaudet"
],
"count": 7
},
{
"reaction": "🤯",
"users": [
"stefan-it"
],
"count": 1
}
] | 2024-07-22T17:44:14.000Z | 2024-07-22T17:44:14.126Z | [] | /posts/lamhieu/421245113676574 | 2,105 | 0 |
797420456175789 | [
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/Azure/azureml-assets/pull/3180/files",
"href": "https://github.com/Azure/azureml-assets/pull/3180/files",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "LLAMA-3.1 benches",
"raw": "LLAMA-3.1 benches",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | ERROR: type should be string, got "\nhttps://github.com/Azure/azureml-assets/pull/3180/files\nLLAMA-3.1 benches" | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/4VOzArmrRaX_DUTxGmm59.jpeg",
"fullname": "Charles McSneed",
"name": "ChuckMcSneed",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 57,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65644e982bdaccfcd536aff1/eWfOOX5Ljs8NgWwpEmdOp.png"
}
] | [] | [
{
"reaction": "😔",
"users": [
"ZeroWw"
],
"count": 1
}
] | 2024-07-22T17:08:09.000Z | 2024-07-26T10:49:35.773Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/660432d2d2e59abb3fd40b8c/TrvNnR8wHDh9lPHm81JfQ.png",
"fullname": "David Meriwether",
"name": "BigHuggyD",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 22,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65995c45539c808e84c38bf1/k0y3ULloWQEMvosQwHgrE.png",
"fullname": "Juk Armstrong",
"name": "jukofyork",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 60,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/4VOzArmrRaX_DUTxGmm59.jpeg",
"fullname": "Charles McSneed",
"name": "ChuckMcSneed",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 57,
"isFollowing": false
}
] | /posts/ChuckMcSneed/797420456175789 | 606 | 26 |
252064064262436 | [
{
"type": "text",
"value": "Hey everyone 🤗!",
"raw": "Hey everyone 🤗!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check out this cool little reproduction of the Clarity Upscaler (",
"raw": "Check out this cool little reproduction of the Clarity Upscaler (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/philz1337x/clarity-upscaler",
"href": "https://github.com/philz1337x/clarity-upscaler",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ") using refiners (",
"raw": ") using refiners (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/finegrain-ai/refiners",
"href": "https://github.com/finegrain-ai/refiners",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "): ",
"raw": "): ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/spaces/finegrain/enhancer",
"href": "https://huggingface.co/spaces/finegrain/enhancer",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hey everyone 🤗!
Check out this cool little reproduction of the Clarity Upscaler (https://github.com/philz1337x/clarity-upscaler) using refiners (https://github.com/finegrain-ai/refiners): https://huggingface.co/spaces/finegrain/enhancer | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669043420538-6364f1784f773b7e4cede70c.jpeg",
"fullname": "Laureηt Fainsin",
"name": "1aurent",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 80,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"limiteinductive",
"deltheil",
"jaebumskiyomi",
"piercus",
"1aurent",
"clem",
"John6666",
"Blane187",
"osanseviero"
],
"count": 9
}
] | 2024-07-22T15:33:56.000Z | 2024-07-24T08:41:25.935Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669043420538-6364f1784f773b7e4cede70c.jpeg",
"fullname": "Laureηt Fainsin",
"name": "1aurent",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 80,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1763,
"isFollowing": false
}
] | /posts/1aurent/252064064262436 | 2,666 | 3 |
312817668697546 | [
{
"type": "text",
"value": "Questions about data, synthetic data, human feedback and data quality?",
"raw": "Questions about data, synthetic data, human feedback and data quality?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Argilla has moved its community from Slack to the Hugging Face Discord server! ",
"raw": "Argilla has moved its community from Slack to the Hugging Face Discord server! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "When part of the Hugging Face Discord, you can select “Channels & roles” and select “Argilla” along with any of the other groups that are interesting to you. “Argilla” will cover anything about argilla and distilabel, and it will give you access to 1) #argilla-distilabel-general, for all general discussions and news, and 2) #argilla-distilabel-help, for any usage-focused questions.",
"raw": "When part of the Hugging Face Discord, you can select “Channels & roles” and select “Argilla” along with any of the other groups that are interesting to you. “Argilla” will cover anything about argilla and distilabel, and it will give you access to 1) #argilla-distilabel-general, for all general discussions and news, and 2) #argilla-distilabel-help, for any usage-focused questions.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Questions about data, synthetic data, human feedback and data quality?
Argilla has moved its community from Slack to the Hugging Face Discord server!
When part of the Hugging Face Discord, you can select “Channels & roles” and select “Argilla” along with any of the other groups that are interesting to you. “Argilla” will cover anything about argilla and distilabel, and it will give you access to 1) #argilla-distilabel-general, for all general discussions and news, and 2) #argilla-distilabel-help, for any usage-focused questions.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg",
"fullname": "David Berenstein",
"name": "davidberenstein1957",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 167,
"isFollowing": false
} | [] | [] | [] | 2024-07-22T07:52:33.000Z | 2024-07-22T07:54:33.263Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1677141720071-634ff41ff32062e9eb7b06a3.jpeg",
"fullname": "David Berenstein",
"name": "davidberenstein1957",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 167,
"isFollowing": false
}
] | /posts/davidberenstein1957/312817668697546 | 672 | 1 |
765711664963513 | [
{
"type": "text",
"value": "🚀 Good folks at ",
"raw": "🚀 Good folks at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@nvidia",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "nvidia",
"label": null,
"lang": null
},
{
"type": "text",
"value": " just dropped: \"ChatQA 2: Bridging the Gap to Proprietary LLMs in Long Context and RAG Capabilities\" 🧠💡",
"raw": " just dropped: \"ChatQA 2: Bridging the Gap to Proprietary LLMs in Long Context and RAG Capabilities\" 🧠💡",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "In the past few months, the open LLM community has made significant progress in releasing open models (Llama-3-70B-Instruct (",
"raw": "In the past few months, the open LLM community has made significant progress in releasing open models (Llama-3-70B-Instruct (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Meta",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Meta",
"label": null,
"lang": null
},
{
"type": "text",
"value": " -AI) 🦙, QWen2-72BInstruct (",
"raw": " -AI) 🦙, QWen2-72BInstruct (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@AlibabaGroup",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "AlibabaGroup",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ) 🌐, Nemotron-4-340B-Instruct (",
"raw": " ) 🌐, Nemotron-4-340B-Instruct (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@nvidia",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "nvidia",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ) ⚙️, and Mixtral-8x22BInstruct-v0.1 (",
"raw": " ) ⚙️, and Mixtral-8x22BInstruct-v0.1 (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@MistralAI",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "MistralAI",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ) 🌪️) that are at par with proprietary models! 📈",
"raw": " ) 🌪️) that are at par with proprietary models! 📈",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "But top models like GPT-4 are still outperforming them in certain domains! 🔝💪",
"raw": "But top models like GPT-4 are still outperforming them in certain domains! 🔝💪",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This led us to having domain-focused open-LLMs (DeepSeek-Coder-V2 for coding and math 👨💻➕, ChatQA 1.5 for conversational QA and retrieval-augmented generation (RAG) 💬🔍, and InternVL 1.5 for vision-language tasks 🖼️🗣️)",
"raw": "This led us to having domain-focused open-LLMs (DeepSeek-Coder-V2 for coding and math 👨💻➕, ChatQA 1.5 for conversational QA and retrieval-augmented generation (RAG) 💬🔍, and InternVL 1.5 for vision-language tasks 🖼️🗣️)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The challenge that ChatQA 2 focuses on is of context length and RAG! 📏🔗",
"raw": "The challenge that ChatQA 2 focuses on is of context length and RAG! 📏🔗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "These are the two capabilities essential for LLMs to process large volumes of information that cannot fit into a single prompt and are complementary to each other, depending on the downstream tasks and computational budgets. 🧩📊",
"raw": "These are the two capabilities essential for LLMs to process large volumes of information that cannot fit into a single prompt and are complementary to each other, depending on the downstream tasks and computational budgets. 🧩📊",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The solution is a detailed continued training recipe to extend the context window of Llama3-70B-base from 8K to 128K tokens, along with a three-stage instruction tuning process to enhance the model's instruction-following, RAG performance, and long-context understanding capabilities. 🔄🔧",
"raw": "The solution is a detailed continued training recipe to extend the context window of Llama3-70B-base from 8K to 128K tokens, along with a three-stage instruction tuning process to enhance the model's instruction-following, RAG performance, and long-context understanding capabilities. 🔄🔧",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📄 Paper: ",
"raw": "📄 Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2407.14482",
"href": null,
"resource": {
"type": "paper",
"id": "2407.14482",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2407.14482",
"code": null,
"user": null,
"label": "ChatQA 2: Bridging the Gap to Proprietary LLMs in Long Context and RAG\n Capabilities (2407.14482)",
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The interesting thing to notice from benchmarks was, how good QWen 2 is out of the box! 👏✨",
"raw": "The interesting thing to notice from benchmarks was, how good QWen 2 is out of the box! 👏✨",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀 Good folks at @nvidia just dropped: "ChatQA 2: Bridging the Gap to Proprietary LLMs in Long Context and RAG Capabilities" 🧠💡
In the past few months, the open LLM community has made significant progress in releasing open models (Llama-3-70B-Instruct (@Meta -AI) 🦙, QWen2-72BInstruct (@AlibabaGroup ) 🌐, Nemotron-4-340B-Instruct (@nvidia ) ⚙️, and Mixtral-8x22BInstruct-v0.1 (@MistralAI ) 🌪️) that are at par with proprietary models! 📈
But top models like GPT-4 are still outperforming them in certain domains! 🔝💪
This led us to having domain-focused open-LLMs (DeepSeek-Coder-V2 for coding and math 👨💻➕, ChatQA 1.5 for conversational QA and retrieval-augmented generation (RAG) 💬🔍, and InternVL 1.5 for vision-language tasks 🖼️🗣️)
The challenge that ChatQA 2 focuses on is of context length and RAG! 📏🔗
These are the two capabilities essential for LLMs to process large volumes of information that cannot fit into a single prompt and are complementary to each other, depending on the downstream tasks and computational budgets. 🧩📊
The solution is a detailed continued training recipe to extend the context window of Llama3-70B-base from 8K to 128K tokens, along with a three-stage instruction tuning process to enhance the model's instruction-following, RAG performance, and long-context understanding capabilities. 🔄🔧
📄 Paper: https://huggingface.co/papers/2407.14482
The interesting thing to notice from benchmarks was, how good QWen 2 is out of the box! 👏✨ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 219,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/L-KtuQhHRZnj-Cr-Rrsp-.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61e8c67cee1e1440121f0240/9sb__WsO5mwmdHHa6xKNc.jpeg",
"fullname": "Meta World Peace",
"name": "Meta",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5
}
] | [
{
"reaction": "👀",
"users": [
"louisbrulenaudet"
],
"count": 1
}
] | 2024-07-22T05:08:52.000Z | 2024-07-22T05:08:52.486Z | [] | /posts/singhsidhukuldeep/765711664963513 | 702 | 0 |
281477193416730 | [
{
"type": "text",
"value": "Intelligence is all you need for roleplay.",
"raw": "Intelligence is all you need for roleplay.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Roleplay is overlooked as a special case of chain-of-thought, where context must be attended to and inferred state of the world and embodied minds must be persisted and evolved along credible narrative lines. LLMs are also being tasked to function as gamemasters. It's a challenging task which points to potential future benchmarks. The fact that the largest commercial LLMs are adept in generating text for roleplay intuitively implies that model intelligence is sufficient so long as it can generalize properly and pay attention to context without becoming confused.",
"raw": "Roleplay is overlooked as a special case of chain-of-thought, where context must be attended to and inferred state of the world and embodied minds must be persisted and evolved along credible narrative lines. LLMs are also being tasked to function as gamemasters. It's a challenging task which points to potential future benchmarks. The fact that the largest commercial LLMs are adept in generating text for roleplay intuitively implies that model intelligence is sufficient so long as it can generalize properly and pay attention to context without becoming confused.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This recent merge of mine composed using 3 academic fine-tunes, none of which were intended for roleplay, has survived the gauntlet of a Reddit post and appears to be a particularly strong 8B model when it comes to roleplay coherence.",
"raw": "This recent merge of mine composed using 3 academic fine-tunes, none of which were intended for roleplay, has survived the gauntlet of a Reddit post and appears to be a particularly strong 8B model when it comes to roleplay coherence.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/grimjim/llama-3-Nephilim-v3-8B",
"href": null,
"resource": {
"type": "model",
"id": "grimjim/llama-3-Nephilim-v3-8B",
"discussionNum": null
},
"url": "https://huggingface.co/grimjim/llama-3-Nephilim-v3-8B",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " (bf16 weights)",
"raw": " (bf16 weights)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/grimjim/llama-3-Nephilim-v3-8B-GGUF",
"href": null,
"resource": {
"type": "model",
"id": "grimjim/llama-3-Nephilim-v3-8B-GGUF",
"discussionNum": null
},
"url": "https://huggingface.co/grimjim/llama-3-Nephilim-v3-8B-GGUF",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " (select quants)",
"raw": " (select quants)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Intelligence is all you need for roleplay.
Roleplay is overlooked as a special case of chain-of-thought, where context must be attended to and inferred state of the world and embodied minds must be persisted and evolved along credible narrative lines. LLMs are also being tasked to function as gamemasters. It's a challenging task which points to potential future benchmarks. The fact that the largest commercial LLMs are adept in generating text for roleplay intuitively implies that model intelligence is sufficient so long as it can generalize properly and pay attention to context without becoming confused.
This recent merge of mine composed using 3 academic fine-tunes, none of which were intended for roleplay, has survived the gauntlet of a Reddit post and appears to be a particularly strong 8B model when it comes to roleplay coherence.
https://huggingface.co/grimjim/llama-3-Nephilim-v3-8B (bf16 weights)
https://huggingface.co/grimjim/llama-3-Nephilim-v3-8B-GGUF (select quants) | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65c992424936ab38ecf706b0/aq7vuHFPO1S93fwJk0Cuq.jpeg",
"fullname": "Jim Lai",
"name": "grimjim",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 166,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"ZeroWw",
"SerialKicked",
"Duttones",
"takeraparterer",
"Cohee",
"jed-tiotuico"
],
"count": 7
}
] | 2024-07-22T01:20:52.000Z | 2024-07-22T22:54:52.894Z | [
{
"avatarUrl": "/avatars/ab4dd498bbc0d5931f733b5a364fa765.svg",
"fullname": "Vitor Lima",
"name": "Duttones",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
}
] | /posts/grimjim/281477193416730 | 2,751 | 1 |
957104236129665 | [
{
"type": "text",
"value": "LivePortrait AI: Transform Static Photos into Talking Videos. Now supporting Video-to-Video conversion and Superior Expression Transfer at Remarkable Speed",
"raw": "LivePortrait AI: Transform Static Photos into Talking Videos. Now supporting Video-to-Video conversion and Superior Expression Transfer at Remarkable Speed",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "A new tutorial is anticipated to showcase the latest changes and features in V3, including Video-to-Video capabilities and additional enhancements.",
"raw": "A new tutorial is anticipated to showcase the latest changes and features in V3, including Video-to-Video capabilities and additional enhancements.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This post provides information for both Windows (local) and Cloud installations (Massed Compute, RunPod, and free Kaggle Account).",
"raw": "This post provides information for both Windows (local) and Cloud installations (Massed Compute, RunPod, and free Kaggle Account).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Windows Local Installation Tutorial ️⤵️",
"raw": "🔗 Windows Local Installation Tutorial ️⤵️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "▶️ ",
"raw": "▶️ ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/FPtpNrmuwXk",
"href": "https://youtu.be/FPtpNrmuwXk",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔗 Cloud (no-GPU) Installations Tutorial for Massed Compute, RunPod and free Kaggle Account ️⤵️",
"raw": "🔗 Cloud (no-GPU) Installations Tutorial for Massed Compute, RunPod and free Kaggle Account ️⤵️",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "▶️ ",
"raw": "▶️ ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/wG7oPp01COg",
"href": "https://youtu.be/wG7oPp01COg",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The V3 update introduces video-to-video functionality. If you're seeking a one-click installation method for LivePortrait, an open-source zero-shot image-to-animation application on Windows, for local use, this tutorial is essential. It introduces the cutting-edge image-to-animation open-source generator Live Portrait. Simply provide a static image and a driving video to create an impressive animation in seconds. LivePortrait is incredibly fast and adept at preserving facial expressions from the input video. The results are truly astonishing.",
"raw": "The V3 update introduces video-to-video functionality. If you're seeking a one-click installation method for LivePortrait, an open-source zero-shot image-to-animation application on Windows, for local use, this tutorial is essential. It introduces the cutting-edge image-to-animation open-source generator Live Portrait. Simply provide a static image and a driving video to create an impressive animation in seconds. LivePortrait is incredibly fast and adept at preserving facial expressions from the input video. The results are truly astonishing.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "With the V3 update adding video-to-video functionality, those interested in using LivePortrait but lacking a powerful GPU, using a Mac, or preferring cloud-based solutions will find this tutorial invaluable. It guides you through the one-click installation and usage of LivePortrait on #MassedCompute, #RunPod, and even a free #Kaggle account. After following this tutorial, you'll find running LivePortrait on cloud services as straightforward as running it locally. LivePortrait is the latest state-of-the-art static image to talking animation generator, surpassing even paid services in both speed and quality.",
"raw": "With the V3 update adding video-to-video functionality, those interested in using LivePortrait but lacking a powerful GPU, using a Mac, or preferring cloud-based solutions will find this tutorial invaluable. It guides you through the one-click installation and usage of LivePortrait on #MassedCompute, #RunPod, and even a free #Kaggle account. After following this tutorial, you'll find running LivePortrait on cloud services as straightforward as running it locally. LivePortrait is the latest state-of-the-art static image to talking animation generator, surpassing even paid services in both speed and quality.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | LivePortrait AI: Transform Static Photos into Talking Videos. Now supporting Video-to-Video conversion and Superior Expression Transfer at Remarkable Speed
A new tutorial is anticipated to showcase the latest changes and features in V3, including Video-to-Video capabilities and additional enhancements.
This post provides information for both Windows (local) and Cloud installations (Massed Compute, RunPod, and free Kaggle Account).
🔗 Windows Local Installation Tutorial ️⤵️
▶️ https://youtu.be/FPtpNrmuwXk
🔗 Cloud (no-GPU) Installations Tutorial for Massed Compute, RunPod and free Kaggle Account ️⤵️
▶️ https://youtu.be/wG7oPp01COg
The V3 update introduces video-to-video functionality. If you're seeking a one-click installation method for LivePortrait, an open-source zero-shot image-to-animation application on Windows, for local use, this tutorial is essential. It introduces the cutting-edge image-to-animation open-source generator Live Portrait. Simply provide a static image and a driving video to create an impressive animation in seconds. LivePortrait is incredibly fast and adept at preserving facial expressions from the input video. The results are truly astonishing.
With the V3 update adding video-to-video functionality, those interested in using LivePortrait but lacking a powerful GPU, using a Mac, or preferring cloud-based solutions will find this tutorial invaluable. It guides you through the one-click installation and usage of LivePortrait on #MassedCompute, #RunPod, and even a free #Kaggle account. After following this tutorial, you'll find running LivePortrait on cloud services as straightforward as running it locally. LivePortrait is the latest state-of-the-art static image to talking animation generator, surpassing even paid services in both speed and quality.
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 376,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/XgzWk_uT2DvCVFtd2L__P.mp4"
}
] | [] | [
{
"reaction": "👍",
"users": [
"MonsterMMORPG",
"Duartebrizza",
"John6666",
"Ramikan-BR",
"attyru",
"Dihelson",
"Blane187",
"Rsln",
"osanseviero"
],
"count": 9
},
{
"reaction": "🔥",
"users": [
"MonsterMMORPG",
"Ramikan-BR",
"bakudas",
"Dihelson",
"abdeljalilELmajjodi",
"Rsln",
"0xjorgev"
],
"count": 7
},
{
"reaction": "👀",
"users": [
"MonsterMMORPG",
"Ramikan-BR",
"yutianCCNU",
"bakudas",
"NHLOCAL"
],
"count": 5
},
{
"reaction": "🤯",
"users": [
"MonsterMMORPG",
"Ramikan-BR",
"bakudas"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"MonsterMMORPG",
"Ramikan-BR"
],
"count": 2
},
{
"reaction": "❤️",
"users": [
"MonsterMMORPG",
"Ramikan-BR"
],
"count": 2
},
{
"reaction": "😎",
"users": [
"MonsterMMORPG",
"Ramikan-BR"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"MonsterMMORPG",
"Ramikan-BR"
],
"count": 2
},
{
"reaction": "🤝",
"users": [
"MonsterMMORPG",
"Ramikan-BR"
],
"count": 2
},
{
"reaction": "🤗",
"users": [
"MonsterMMORPG",
"Ramikan-BR"
],
"count": 2
},
{
"reaction": "➕",
"users": [
"MonsterMMORPG",
"Ramikan-BR"
],
"count": 2
}
] | 2024-07-21T22:05:35.000Z | 2024-07-22T10:08:03.626Z | [
{
"avatarUrl": "/avatars/d92c459b18a4aa5642e5c4bd3b8e3fe4.svg",
"fullname": "Mendonca",
"name": "Dihelson",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 376,
"isFollowing": false
}
] | /posts/MonsterMMORPG/957104236129665 | 5,210 | 2 |
247059445469957 | [
{
"type": "text",
"value": "Hi HF Community!🤗",
"raw": "Hi HF Community!🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "As you may know, small language models like the SmolLM series have been on the rise recently: although it may not be completely fair to compare them with larger models, my thought was that we could build a space where these SLMs could compete against each other in a chat arena, and here is what came out: ",
"raw": "As you may know, small language models like the SmolLM series have been on the rise recently: although it may not be completely fair to compare them with larger models, my thought was that we could build a space where these SLMs could compete against each other in a chat arena, and here is what came out: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/as-cle-bert/smolLM-arena",
"href": null,
"resource": {
"type": "space",
"id": "as-cle-bert/smolLM-arena",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/as-cle-bert/smolLM-arena",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " 🚀",
"raw": " 🚀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Even though there might be some little issues and hiccups due to GPU resources allocation, this space offers you the possibility to compare and play around with several Small Language Models, coming also with a leaderboard page (make sure to refresh it for the latest updates!)👑",
"raw": "Even though there might be some little issues and hiccups due to GPU resources allocation, this space offers you the possibility to compare and play around with several Small Language Models, coming also with a leaderboard page (make sure to refresh it for the latest updates!)👑",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Have fun!🍕",
"raw": "Have fun!🍕",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Hi HF Community!🤗
As you may know, small language models like the SmolLM series have been on the rise recently: although it may not be completely fair to compare them with larger models, my thought was that we could build a space where these SLMs could compete against each other in a chat arena, and here is what came out: https://huggingface.co/spaces/as-cle-bert/smolLM-arena 🚀
Even though there might be some little issues and hiccups due to GPU resources allocation, this space offers you the possibility to compare and play around with several Small Language Models, coming also with a leaderboard page (make sure to refresh it for the latest updates!)👑
Have fun!🍕 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e330e7edc2f7306e252448/ucpk9c8x0UafGM4mXTrRy.jpeg",
"fullname": "Astra Clelia Bertelli",
"name": "as-cle-bert",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 650,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"clfegg",
"osanseviero"
],
"count": 2
}
] | 2024-07-21T19:19:15.000Z | 2024-07-22T13:18:10.736Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/644cb09a22d211df644a0a6c/v0EHypMU4X3Oxxf3cao_O.png",
"fullname": "Júlio César",
"name": "Ramikan-BR",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 10,
"isFollowing": false
},
{
"avatarUrl": "/avatars/d92c459b18a4aa5642e5c4bd3b8e3fe4.svg",
"fullname": "Mendonca",
"name": "Dihelson",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65e330e7edc2f7306e252448/ucpk9c8x0UafGM4mXTrRy.jpeg",
"fullname": "Astra Clelia Bertelli",
"name": "as-cle-bert",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 650,
"isFollowing": false
}
] | /posts/as-cle-bert/247059445469957 | 562 | 5 |
851413061927294 | [
{
"type": "text",
"value": "Introducing OpenCHAT mini: a lightweight, fast, and unlimited version of OpenGPT 4o.",
"raw": "Introducing OpenCHAT mini: a lightweight, fast, and unlimited version of OpenGPT 4o.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/KingNish/OpenCHAT-mini2",
"href": null,
"resource": {
"type": "space",
"id": "KingNish/OpenCHAT-mini2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/KingNish/OpenCHAT-mini2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It has unlimited web search, vision and image generation.",
"raw": "It has unlimited web search, vision and image generation.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Please take a look and share your review. Thank you! 🤗",
"raw": "Please take a look and share your review. Thank you! 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Introducing OpenCHAT mini: a lightweight, fast, and unlimited version of OpenGPT 4o.
https://huggingface.co/spaces/KingNish/OpenCHAT-mini2
It has unlimited web search, vision and image generation.
Please take a look and share your review. Thank you! 🤗 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg",
"fullname": "Nishith Jain",
"name": "KingNish",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1079,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"JackLiuAngel",
"abbotk",
"Blane187",
"Trillionare",
"SvCy",
"louisbrulenaudet",
"AdamyaG",
"noelpil",
"peterciank"
],
"count": 10
},
{
"reaction": "😎",
"users": [
"Wuayker"
],
"count": 1
}
] | 2024-07-21T16:46:28.000Z | 2024-08-19T07:07:10.277Z | [
{
"avatarUrl": "/avatars/e64b02b16f82f7062248393ab51761d0.svg",
"fullname": "JackLiu",
"name": "JackLiuAngel",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
},
{
"avatarUrl": "/avatars/de5465ed2cc75f84f772bc2e595f5740.svg",
"fullname": "Levi Zoesch",
"name": "xbelevi",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6612aedf09f16e7347dfa7e1/bPYjBXCedY_1fSIPjoBTY.jpeg",
"fullname": "Nishith Jain",
"name": "KingNish",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1079,
"isFollowing": false
},
{
"avatarUrl": "/avatars/d3290da7c6de68101e81208688f8f5a1.svg",
"fullname": "Timofey",
"name": "nbv123",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "/avatars/937bb0ad65d807c6ea24499dc4544fa4.svg",
"fullname": "Pashangh Irani",
"name": "Orion34523",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/KingNish/851413061927294 | 5,870 | 7 |
677543233180235 | [
{
"type": "text",
"value": "Reading ",
"raw": "Reading ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2212.11279",
"href": null,
"resource": {
"type": "paper",
"id": "2212.11279",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2212.11279",
"code": null,
"user": null,
"label": "Annotated History of Modern AI and Deep Learning (2212.11279)",
"lang": null
},
{
"type": "text",
"value": " by Jürgen Schmidhuber.",
"raw": " by Jürgen Schmidhuber.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "From the abstract: \"A modern history of AI will emphasize breakthroughs outside of the focus of traditional AI text books, in particular, mathematical foundations of today's NNs such as the chain rule (1676), the first NNs (linear regression, circa 1800), and the first working deep learners (1965-).\"",
"raw": "From the abstract: \"A modern history of AI will emphasize breakthroughs outside of the focus of traditional AI text books, in particular, mathematical foundations of today's NNs such as the chain rule (1676), the first NNs (linear regression, circa 1800), and the first working deep learners (1965-).\"",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Reading https://huggingface.co/papers/2212.11279 by Jürgen Schmidhuber.
From the abstract: "A modern history of AI will emphasize breakthroughs outside of the focus of traditional AI text books, in particular, mathematical foundations of today's NNs such as the chain rule (1676), the first NNs (linear regression, circa 1800), and the first working deep learners (1965-)." | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64b695dcd3df8086e5ed7c89/06Toh65jDEz3WJbIM6ZmZ.jpeg",
"fullname": "Adam Fields",
"name": "adamelliotfields",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 10,
"isFollowing": false
} | [] | [] | [] | 2024-07-21T13:41:54.000Z | 2024-07-21T13:42:25.801Z | [] | /posts/adamelliotfields/677543233180235 | 588 | 0 |
332303861025570 | [
{
"type": "text",
"value": "You can now find the OBIS - Ocean Biodiversity Information System, on Hugging Face with 128M rows, via the Datasets package stream 🤗",
"raw": "You can now find the OBIS - Ocean Biodiversity Information System, on Hugging Face with 128M rows, via the Datasets package stream 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The datasets are integrated, allowing seamless search and mapping by species name, higher taxonomic level, geographic area, depth, time, and environmental parameters. OBIS originates from the Census of Marine Life (2000-2010) and was adopted as a project under IOC-UNESCO’s International Oceanographic Data and Information (IODE) programme in 2009.",
"raw": "The datasets are integrated, allowing seamless search and mapping by species name, higher taxonomic level, geographic area, depth, time, and environmental parameters. OBIS originates from the Census of Marine Life (2000-2010) and was adopted as a project under IOC-UNESCO’s International Oceanographic Data and Information (IODE) programme in 2009.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Collectively, they have provided over 45 million observations of nearly 120,000 marine species, ranging from bacteria to whales, from the surface to 10,900 meters depth, and from the tropics to the poles.",
"raw": "Collectively, they have provided over 45 million observations of nearly 120,000 marine species, ranging from bacteria to whales, from the surface to 10,900 meters depth, and from the tropics to the poles.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Link to the dataset: ",
"raw": "Link to the dataset: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/louisbrulenaudet/obis",
"href": null,
"resource": {
"type": "dataset",
"id": "louisbrulenaudet/obis",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/louisbrulenaudet/obis",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | You can now find the OBIS - Ocean Biodiversity Information System, on Hugging Face with 128M rows, via the Datasets package stream 🤗
The datasets are integrated, allowing seamless search and mapping by species name, higher taxonomic level, geographic area, depth, time, and environmental parameters. OBIS originates from the Census of Marine Life (2000-2010) and was adopted as a project under IOC-UNESCO’s International Oceanographic Data and Information (IODE) programme in 2009.
Collectively, they have provided over 45 million observations of nearly 120,000 marine species, ranging from bacteria to whales, from the surface to 10,900 meters depth, and from the tropics to the poles.
Link to the dataset: https://huggingface.co/datasets/louisbrulenaudet/obis | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6459fa0f5b3111fbe83286e1/UhCa7JNbtTjC6dgOjZtH0.jpeg",
"fullname": "Louis Brulé Naudet",
"name": "louisbrulenaudet",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 174,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6459fa0f5b3111fbe83286e1/JhUbEswcpw6ROpeDFkal6.jpeg"
}
] | [] | [] | 2024-07-21T07:55:07.000Z | 2024-07-21T07:55:07.031Z | [] | /posts/louisbrulenaudet/332303861025570 | 869 | 0 |
876153321993040 | [
{
"type": "text",
"value": "New Trends in LLM: Overview with Focus on xLLM",
"raw": "New Trends in LLM: Overview with Focus on xLLM",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Read full article and download PowerPoint presentation at ",
"raw": "Read full article and download PowerPoint presentation at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://mltblog.com/3KqlNO7",
"href": "https://mltblog.com/3KqlNO7",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you ever wondered how xLLM is different from other LLM and RAG architectures, what are the foundational changes that make it appealing to fortune 100 companies, and what are the innovations being copied by competitors, read on. In this article, I share the latest trends and provide a high-level summary of xLLM, describing the ground-breaking technologies that make it unique, faster, and better for professional users and experts. In particular, I share my PowerPoint presentation on the topic.",
"raw": "If you ever wondered how xLLM is different from other LLM and RAG architectures, what are the foundational changes that make it appealing to fortune 100 companies, and what are the innovations being copied by competitors, read on. In this article, I share the latest trends and provide a high-level summary of xLLM, describing the ground-breaking technologies that make it unique, faster, and better for professional users and experts. In particular, I share my PowerPoint presentation on the topic.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Search is becoming hot again, this time powered by RAG and LLMs rather than PageRank. New LLMs may not use transformers, and energy-efficient implementations are gaining popularity, with an attempt to lower GPU usage, and thus costs. Yet all but xLLM still rely on Blackbox neural networks.",
"raw": "Search is becoming hot again, this time powered by RAG and LLMs rather than PageRank. New LLMs may not use transformers, and energy-efficient implementations are gaining popularity, with an attempt to lower GPU usage, and thus costs. Yet all but xLLM still rely on Blackbox neural networks.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Great evaluation metrics remain elusive and will remain so probably forever: in the end, LLMs, just like clustering, are part of unsupervised learning. Two users looking at a non-trivial dataset will never agree on what the “true” underlying cluster structure is. Because “true” is meaningless in this context. The same applies to LLMs. With some exceptions: when used for predictive analytics, that is, supervised learning, it is possible to tell which LLM is best in absolute terms (to some extent; it also depends on the dataset).",
"raw": "Great evaluation metrics remain elusive and will remain so probably forever: in the end, LLMs, just like clustering, are part of unsupervised learning. Two users looking at a non-trivial dataset will never agree on what the “true” underlying cluster structure is. Because “true” is meaningless in this context. The same applies to LLMs. With some exceptions: when used for predictive analytics, that is, supervised learning, it is possible to tell which LLM is best in absolute terms (to some extent; it also depends on the dataset).",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | New Trends in LLM: Overview with Focus on xLLM
Read full article and download PowerPoint presentation at https://mltblog.com/3KqlNO7
If you ever wondered how xLLM is different from other LLM and RAG architectures, what are the foundational changes that make it appealing to fortune 100 companies, and what are the innovations being copied by competitors, read on. In this article, I share the latest trends and provide a high-level summary of xLLM, describing the ground-breaking technologies that make it unique, faster, and better for professional users and experts. In particular, I share my PowerPoint presentation on the topic.
Search is becoming hot again, this time powered by RAG and LLMs rather than PageRank. New LLMs may not use transformers, and energy-efficient implementations are gaining popularity, with an attempt to lower GPU usage, and thus costs. Yet all but xLLM still rely on Blackbox neural networks.
Great evaluation metrics remain elusive and will remain so probably forever: in the end, LLMs, just like clustering, are part of unsupervised learning. Two users looking at a non-trivial dataset will never agree on what the “true” underlying cluster structure is. Because “true” is meaningless in this context. The same applies to LLMs. With some exceptions: when used for predictive analytics, that is, supervised learning, it is possible to tell which LLM is best in absolute terms (to some extent; it also depends on the dataset).
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/669c89e98f2dbc203f9e74ab/higvnXEHeo_Ig2bgTpn47.png",
"fullname": "Vincent Granville",
"name": "vincentg64",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 17,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/669c89e98f2dbc203f9e74ab/FM4OPaZa5zsL_NJYbr9pm.png"
}
] | [] | [] | 2024-07-21T04:55:17.000Z | 2024-07-21T04:56:40.618Z | [] | /posts/vincentg64/876153321993040 | 466 | 0 |
933947307384174 | [
{
"type": "text",
"value": "I can do Time Series Predictions with Swarm Algorithms! When all you know how to use is a hammer, everything looks like a nail. An LLM model is a hammer. It is not a deity. It has computational and mathematical limitations. Very big ones. Swarm Algorithms do not have this same problem. They are like a screwdriver. The screwdriver is not better than the hammer, both are useful. Why are LLM models bad at things like Time Series Predictions and Function Calls? Because those are jobs better fit for a screwdriver as opposed to a hammer. ",
"raw": "I can do Time Series Predictions with Swarm Algorithms! When all you know how to use is a hammer, everything looks like a nail. An LLM model is a hammer. It is not a deity. It has computational and mathematical limitations. Very big ones. Swarm Algorithms do not have this same problem. They are like a screwdriver. The screwdriver is not better than the hammer, both are useful. Why are LLM models bad at things like Time Series Predictions and Function Calls? Because those are jobs better fit for a screwdriver as opposed to a hammer. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I can do Time Series Predictions with Swarm Algorithms! When all you know how to use is a hammer, everything looks like a nail. An LLM model is a hammer. It is not a deity. It has computational and mathematical limitations. Very big ones. Swarm Algorithms do not have this same problem. They are like a screwdriver. The screwdriver is not better than the hammer, both are useful. Why are LLM models bad at things like Time Series Predictions and Function Calls? Because those are jobs better fit for a screwdriver as opposed to a hammer. | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64274b69ba6cef0a6ebb0fd6/IBnV8-RjcZwM4oR3Bk6Jm.png"
}
] | [] | [] | 2024-07-21T00:33:23.000Z | 2024-07-21T02:40:31.690Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6316fb937b0ee0136e5f1220/poHBoJ7QAF_s2CCaosdvQ.jpeg",
"fullname": "Firstname Lastname",
"name": "takeraparterer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 29,
"isFollowing": false
}
] | /posts/TuringsSolutions/933947307384174 | 515 | 1 |
295019542040831 | [
{
"type": "text",
"value": "Remember when you had a few hundred rows of data that could easily be opened in Excel. 📊",
"raw": "Remember when you had a few hundred rows of data that could easily be opened in Excel. 📊",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Well, we are far from that with billion-parameter LLMs trained on trillions of tokens. 🌐",
"raw": "Well, we are far from that with billion-parameter LLMs trained on trillions of tokens. 🌐",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@Microsoft",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "Microsoft",
"label": null,
"lang": null
},
{
"type": "text",
"value": " wants to bridge that using \"SpreadsheetLLM\": Encoding Spreadsheets for Large Language Models. 🤖📈",
"raw": " wants to bridge that using \"SpreadsheetLLM\": Encoding Spreadsheets for Large Language Models. 🤖📈",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "While it sounds simple, Spreadsheets, with their extensive two-dimensional grids, various layouts, and diverse formatting options, present notable challenges for large language models (LLMs). 🚧",
"raw": "While it sounds simple, Spreadsheets, with their extensive two-dimensional grids, various layouts, and diverse formatting options, present notable challenges for large language models (LLMs). 🚧",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "They initially propose a vanilla serialization approach that incorporates cell addresses, values, and formats. However, this approach is limited by LLMs' token constraints, making it impractical for most applications. ⛔",
"raw": "They initially propose a vanilla serialization approach that incorporates cell addresses, values, and formats. However, this approach is limited by LLMs' token constraints, making it impractical for most applications. ⛔",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Solution... A SheetCompressor, an innovative encoding framework that compresses spreadsheets effectively for LLMs. 🔧",
"raw": "Solution... A SheetCompressor, an innovative encoding framework that compresses spreadsheets effectively for LLMs. 🔧",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It comprises three modules: ",
"raw": "It comprises three modules: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "1️⃣ Structural-anchor-based compression",
"raw": "1️⃣ Structural-anchor-based compression",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "2️⃣ Inverse index translation",
"raw": "2️⃣ Inverse index translation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3️⃣ Data-format-aware aggregation",
"raw": "3️⃣ Data-format-aware aggregation",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "It significantly improves performance in spreadsheet table detection task, outperforming the vanilla approach by 25.6% in GPT4's in-context learning setting. 🏆",
"raw": "It significantly improves performance in spreadsheet table detection task, outperforming the vanilla approach by 25.6% in GPT4's in-context learning setting. 🏆",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Sounds exciting, sadly no code, models OR datasets are released. 🙁",
"raw": "Sounds exciting, sadly no code, models OR datasets are released. 🙁",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Moreover, there is a lot of research in encoding 2D position embeddings and this work has not been benchmarked against that! 🧐",
"raw": "Moreover, there is a lot of research in encoding 2D position embeddings and this work has not been benchmarked against that! 🧐",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Paper: ",
"raw": "Paper: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2407.09025",
"href": null,
"resource": {
"type": "paper",
"id": "2407.09025",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2407.09025",
"code": null,
"user": null,
"label": "SpreadsheetLLM: Encoding Spreadsheets for Large Language Models (2407.09025)",
"lang": null
}
] | Remember when you had a few hundred rows of data that could easily be opened in Excel. 📊
Well, we are far from that with billion-parameter LLMs trained on trillions of tokens. 🌐
@Microsoft wants to bridge that using "SpreadsheetLLM": Encoding Spreadsheets for Large Language Models. 🤖📈
While it sounds simple, Spreadsheets, with their extensive two-dimensional grids, various layouts, and diverse formatting options, present notable challenges for large language models (LLMs). 🚧
They initially propose a vanilla serialization approach that incorporates cell addresses, values, and formats. However, this approach is limited by LLMs' token constraints, making it impractical for most applications. ⛔
Solution... A SheetCompressor, an innovative encoding framework that compresses spreadsheets effectively for LLMs. 🔧
It comprises three modules:
1️⃣ Structural-anchor-based compression
2️⃣ Inverse index translation
3️⃣ Data-format-aware aggregation
It significantly improves performance in spreadsheet table detection task, outperforming the vanilla approach by 25.6% in GPT4's in-context learning setting. 🏆
Sounds exciting, sadly no code, models OR datasets are released. 🙁
Moreover, there is a lot of research in encoding 2D position embeddings and this work has not been benchmarked against that! 🧐
Paper: https://huggingface.co/papers/2407.09025 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/662bf5bfe93bb73804ef9344/WXYLnjjJ4SROkoveIi7If.png",
"fullname": "Kuldeep Singh Sidhu",
"name": "singhsidhukuldeep",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 219,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/662bf5bfe93bb73804ef9344/36ySC-N5igRF3LbgJJo35.jpeg"
}
] | [] | [] | 2024-07-20T19:45:25.000Z | 2024-07-20T19:45:25.923Z | [] | /posts/singhsidhukuldeep/295019542040831 | 455 | 0 |
897353773831828 | [
{
"type": "text",
"value": "Wanted to share some brief comparison of early training of the two-stage PixArt e-diffi pipeline.",
"raw": "Wanted to share some brief comparison of early training of the two-stage PixArt e-diffi pipeline.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "On the left, we have the full stage 1 model generating all 50 steps on its own. This model is not trained at all on the final 400 timesteps of the schedule. On the right, we have the combined pipeline where stage 1 output is fed into stage 2.",
"raw": "On the left, we have the full stage 1 model generating all 50 steps on its own. This model is not trained at all on the final 400 timesteps of the schedule. On the right, we have the combined pipeline where stage 1 output is fed into stage 2.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Currently, the difference is rather minimal - but the small details are reliably improved. ",
"raw": "Currently, the difference is rather minimal - but the small details are reliably improved. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "In the watercolour example, the full generation (right side) has the texture of the watercolour paper, and the partial generation (left side) has a more flat digital art look to it.",
"raw": "In the watercolour example, the full generation (right side) has the texture of the watercolour paper, and the partial generation (left side) has a more flat digital art look to it.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "For the blacksmith robot, the sparks emitted from the operation have a more natural blend to it. The robot's clothing appears to be undergoing some interesting transformation due to the undertrained state of the weights.",
"raw": "For the blacksmith robot, the sparks emitted from the operation have a more natural blend to it. The robot's clothing appears to be undergoing some interesting transformation due to the undertrained state of the weights.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The medieval battle image has improved blades of grass, settling dust particles, and fabric in the flag.",
"raw": "The medieval battle image has improved blades of grass, settling dust particles, and fabric in the flag.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The stage 2 model being trained does not seem to resolve any global coherence issues despite having 400 steps in its schedule, but it still noticeably changes the local coherence, eg. the consistency of fabrics and metals can be improved through stage 2 fine-tuning.",
"raw": "The stage 2 model being trained does not seem to resolve any global coherence issues despite having 400 steps in its schedule, but it still noticeably changes the local coherence, eg. the consistency of fabrics and metals can be improved through stage 2 fine-tuning.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The stage 1 model is the workhorse of the output, as expected with the 600 timesteps in its schedule. Additional fine-tuning of this model will improve the overall global coherence of the outputs. I wish I could say it will not impact fine details, but a lot of that does seem to be carried forward.",
"raw": "The stage 1 model is the workhorse of the output, as expected with the 600 timesteps in its schedule. Additional fine-tuning of this model will improve the overall global coherence of the outputs. I wish I could say it will not impact fine details, but a lot of that does seem to be carried forward.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "As noted, these models are undertrained due to a lack of compute. But they are a promising look toward what an e-diffi PixArt might be capable of.",
"raw": "As noted, these models are undertrained due to a lack of compute. But they are a promising look toward what an e-diffi PixArt might be capable of.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Does anyone want to build this out fully with me?",
"raw": "Does anyone want to build this out fully with me?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Wanted to share some brief comparison of early training of the two-stage PixArt e-diffi pipeline.
On the left, we have the full stage 1 model generating all 50 steps on its own. This model is not trained at all on the final 400 timesteps of the schedule. On the right, we have the combined pipeline where stage 1 output is fed into stage 2.
Currently, the difference is rather minimal - but the small details are reliably improved.
In the watercolour example, the full generation (right side) has the texture of the watercolour paper, and the partial generation (left side) has a more flat digital art look to it.
For the blacksmith robot, the sparks emitted from the operation have a more natural blend to it. The robot's clothing appears to be undergoing some interesting transformation due to the undertrained state of the weights.
The medieval battle image has improved blades of grass, settling dust particles, and fabric in the flag.
The stage 2 model being trained does not seem to resolve any global coherence issues despite having 400 steps in its schedule, but it still noticeably changes the local coherence, eg. the consistency of fabrics and metals can be improved through stage 2 fine-tuning.
The stage 1 model is the workhorse of the output, as expected with the 600 timesteps in its schedule. Additional fine-tuning of this model will improve the overall global coherence of the outputs. I wish I could say it will not impact fine details, but a lot of that does seem to be carried forward.
As noted, these models are undertrained due to a lack of compute. But they are a promising look toward what an e-diffi PixArt might be capable of.
Does anyone want to build this out fully with me? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641caf6c043963b1c0a27256/CD7ktICDsldVJlpiND5kl.png",
"fullname": "PseudoTerminal X",
"name": "bghira",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 103,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/641caf6c043963b1c0a27256/Oxrq0_SpXhQrpdVrubHEQ.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/641caf6c043963b1c0a27256/WkIj-nTKuHTynI10jseOQ.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/641caf6c043963b1c0a27256/bYohQ1rCDrkjh2bXItJCa.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"datnt114",
"GPT007",
"Sylvestre"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"FilipeR",
"tcreamype"
],
"count": 2
},
{
"reaction": "👀",
"users": [
"afondiel",
"GPT007"
],
"count": 2
}
] | 2024-07-20T18:48:05.000Z | 2024-07-21T02:00:18.824Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641caf6c043963b1c0a27256/CD7ktICDsldVJlpiND5kl.png",
"fullname": "PseudoTerminal X",
"name": "bghira",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 103,
"isFollowing": false
}
] | /posts/bghira/897353773831828 | 4,366 | 1 |
715230803020940 | [
{
"type": "text",
"value": "Check this out on Poe",
"raw": "Check this out on Poe",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "StableDiffusionXL Prompt Generator",
"raw": "StableDiffusionXL Prompt Generator",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ": ",
"raw": ": ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://poe.com/sdpai",
"href": "https://poe.com/sdpai",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Check this out on Poe
StableDiffusionXL Prompt Generator
: https://poe.com/sdpai | {
"avatarUrl": "/avatars/d773a7dd9b706759131fc482ab71ced7.svg",
"fullname": "[email protected]",
"name": "Taf2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 8,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/64841af2295256340e4b9f88/nlhsjHEDVK3p21f_6nTVp.webp"
}
] | [] | [] | 2024-07-20T13:37:25.000Z | 2024-07-20T13:37:25.798Z | [] | /posts/Taf2023/715230803020940 | 392 | 0 |
821875507658799 | [
{
"type": "text",
"value": "We Got a Job Offer in SECourses Discord Channel Related to AI (Stable Diffusion)",
"raw": "We Got a Job Offer in SECourses Discord Channel Related to AI (Stable Diffusion)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This is kind of announcement sharing. I think the offer looks decent.",
"raw": "This is kind of announcement sharing. I think the offer looks decent.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "For who doesn’t know our channel here : ",
"raw": "For who doesn’t know our channel here : ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://discord.com/servers/software-engineering-courses-secourses-772774097734074388",
"href": "https://discord.com/servers/software-engineering-courses-secourses-772774097734074388",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The job offer is in ai-related-job-offers sub-channel",
"raw": "The job offer is in ai-related-job-offers sub-channel",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | We Got a Job Offer in SECourses Discord Channel Related to AI (Stable Diffusion)
This is kind of announcement sharing. I think the offer looks decent.
For who doesn’t know our channel here : https://discord.com/servers/software-engineering-courses-secourses-772774097734074388
The job offer is in ai-related-job-offers sub-channel | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1672531901326-6345bd89fe134dfd7a0dba40.png",
"fullname": "Furkan Gözükara",
"name": "MonsterMMORPG",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 376,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6345bd89fe134dfd7a0dba40/2mJOUgm4YztZ10A6jXRwU.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"Skrypt"
],
"count": 1
}
] | 2024-07-20T02:17:21.000Z | 2024-07-20T02:17:21.964Z | [] | /posts/MonsterMMORPG/821875507658799 | 764 | 0 |
955927481434060 | [
{
"type": "text",
"value": "I like training LoRAs",
"raw": "I like training LoRAs",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/nroggendorff/create-diffusers-dataset",
"href": "https://huggingface.co/blog/nroggendorff/create-diffusers-dataset",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I like training LoRAs
https://huggingface.co/blog/nroggendorff/create-diffusers-dataset | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png",
"fullname": "Noa Roggendorff",
"name": "nroggendorff",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 141,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"Malumatra",
"Blane187",
"merve",
"AtAndDev",
"Nymbo",
"prithivMLmods"
],
"count": 6
},
{
"reaction": "👍",
"users": [
"John6666",
"LeroyDyer",
"merve",
"AtAndDev",
"Nymbo"
],
"count": 5
},
{
"reaction": "😔",
"users": [
"ZeroWw",
"AtAndDev",
"Nymbo"
],
"count": 3
}
] | 2024-07-19T21:12:30.000Z | 2024-07-19T21:12:30.724Z | [] | /posts/nroggendorff/955927481434060 | 2,784 | 0 |
877209328076357 | [
{
"type": "text",
"value": "Websites slam doors on AI data harvesting 🚪🔒",
"raw": "Websites slam doors on AI data harvesting 🚪🔒",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "New study \"Consent in Crisis: The Rapid Decline of the AI Data Commons\" reveals a rapid decline in open web access.",
"raw": "New study \"Consent in Crisis: The Rapid Decline of the AI Data Commons\" reveals a rapid decline in open web access.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Key findings from 14,000 web domains audit:",
"raw": "Key findings from 14,000 web domains audit:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- +5% of three common data sets (C4, RefinedWeb and Dolma) now fully restricted, +25% of the highest-quality sources now fully restricted",
"raw": "- +5% of three common data sets (C4, RefinedWeb and Dolma) now fully restricted, +25% of the highest-quality sources now fully restricted",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- 45% of C4 restricted by Terms of Service",
"raw": "- 45% of C4 restricted by Terms of Service",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Noteworthy trends:",
"raw": "Noteworthy trends:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚫🔄 OpenAI banned 2x more than any other company",
"raw": "🚫🔄 OpenAI banned 2x more than any other company",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📰🔐 News sites leading restrictions: 45% of tokens off-limits",
"raw": "📰🔐 News sites leading restrictions: 45% of tokens off-limits",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Two quotes in the NYT piece to ponder: ",
"raw": "Two quotes in the NYT piece to ponder: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "“Unsurprisingly, we’re seeing blowback from data creators after the text, images and videos they’ve shared online are used to develop commercial systems that sometimes directly threaten their livelihoods.” — ",
"raw": "“Unsurprisingly, we’re seeing blowback from data creators after the text, images and videos they’ve shared online are used to develop commercial systems that sometimes directly threaten their livelihoods.” — ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@yjernite",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "yjernite",
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "“Major tech companies already have all of the data. Changing the license on the data doesn’t retroactively revoke that permission, and the primary impact is on later-arriving actors, who are typically either smaller start-ups or researchers.” — ",
"raw": "“Major tech companies already have all of the data. Changing the license on the data doesn’t retroactively revoke that permission, and the primary impact is on later-arriving actors, who are typically either smaller start-ups or researchers.” — ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@stellaathena",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "stellaathena",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉 Dive into the research: ",
"raw": "👉 Dive into the research: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.dataprovenance.org/consent-in-crisis-paper",
"href": "https://www.dataprovenance.org/consent-in-crisis-paper",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉 Read the NYT story: ",
"raw": "👉 Read the NYT story: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://www.nytimes.com/2024/07/19/technology/ai-data-restrictions.html",
"href": "https://www.nytimes.com/2024/07/19/technology/ai-data-restrictions.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "#AIEthics #DataPrivacy",
"raw": "#AIEthics #DataPrivacy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Websites slam doors on AI data harvesting 🚪🔒
New study "Consent in Crisis: The Rapid Decline of the AI Data Commons" reveals a rapid decline in open web access.
Key findings from 14,000 web domains audit:
- +5% of three common data sets (C4, RefinedWeb and Dolma) now fully restricted, +25% of the highest-quality sources now fully restricted
- 45% of C4 restricted by Terms of Service
Noteworthy trends:
🚫🔄 OpenAI banned 2x more than any other company
📰🔐 News sites leading restrictions: 45% of tokens off-limits
Two quotes in the NYT piece to ponder:
“Unsurprisingly, we’re seeing blowback from data creators after the text, images and videos they’ve shared online are used to develop commercial systems that sometimes directly threaten their livelihoods.” — @yjernite
“Major tech companies already have all of the data. Changing the license on the data doesn’t retroactively revoke that permission, and the primary impact is on later-arriving actors, who are typically either smaller start-ups or researchers.” — @stellaathena
👉 Dive into the research: https://www.dataprovenance.org/consent-in-crisis-paper
👉 Read the NYT story: https://www.nytimes.com/2024/07/19/technology/ai-data-restrictions.html
#AIEthics #DataPrivacy
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/sXF9rxI8oLGj2erwNKg1o.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/mRIITpTtaHgt6bVYYYA5b.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/60347d3660e3dd96631c9093/B3fuZer5N04tZIAYrLnz4.jpeg",
"fullname": "Stella Biderman",
"name": "stellaathena",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2002
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1594144055859-5ee3a7cd2a3eae3cbdad1305.jpeg",
"fullname": "Yacine Jernite",
"name": "yjernite",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 151
}
] | [
{
"reaction": "🔥",
"users": [
"oneiroid",
"zecerman"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"louisbrulenaudet"
],
"count": 1
}
] | 2024-07-19T20:31:43.000Z | 2024-07-19T20:31:43.344Z | [] | /posts/fdaudens/877209328076357 | 636 | 0 |
874928848909758 | [
{
"type": "text",
"value": "InSPyReNet Background Removal",
"raw": "InSPyReNet Background Removal",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I've built a space for fast background removal. ",
"raw": "I've built a space for fast background removal. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/gokaygokay/Inspyrenet-Rembg",
"href": null,
"resource": {
"type": "space",
"id": "gokaygokay/Inspyrenet-Rembg",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/gokaygokay/Inspyrenet-Rembg",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/plemeri/InSPyReNet",
"href": "https://github.com/plemeri/InSPyReNet",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | InSPyReNet Background Removal
I've built a space for fast background removal.
- https://huggingface.co/spaces/gokaygokay/Inspyrenet-Rembg
- https://github.com/plemeri/InSPyReNet | {
"avatarUrl": "/avatars/b9a6d8e11ec7a62ca2b819e0b6c37222.svg",
"fullname": "gokay aydogan",
"name": "gokaygokay",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1130,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/Tz4KleCHEbpzRjx05PIs_.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/wcDO9JXEMkOgUhFOWpSpy.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/Ql8oPXfRmIwsWRhIANwuJ.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"John6666",
"DonkeySmall",
"den0620",
"Ramikan-BR",
"sxnr1",
"Rsln",
"DIvAndrey",
"Wok",
"jeffcookio",
"MoAusaf"
],
"count": 10
},
{
"reaction": "🚀",
"users": [
"Ramikan-BR"
],
"count": 1
},
{
"reaction": "👀",
"users": [
"Ramikan-BR"
],
"count": 1
},
{
"reaction": "❤️",
"users": [
"Ramikan-BR"
],
"count": 1
},
{
"reaction": "🧠",
"users": [
"Ramikan-BR"
],
"count": 1
}
] | 2024-07-19T18:26:46.000Z | 2024-10-15T04:25:06.600Z | [
{
"avatarUrl": "/avatars/efbc06330c77e2dc37a3bb13e4494c3d.svg",
"fullname": "Sukanth K",
"name": "Sukanth07",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/gokaygokay/874928848909758 | 4,698 | 2 |
756501126284636 | [
{
"type": "text",
"value": "New feature 🔥 ",
"raw": "New feature 🔥 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Image models and LoRAs now have little previews 🤏",
"raw": "Image models and LoRAs now have little previews 🤏",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you don't know where to start to find them, I invite you to browse cool LoRAs in the profile of some amazing fine-tuners: ",
"raw": "If you don't know where to start to find them, I invite you to browse cool LoRAs in the profile of some amazing fine-tuners: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@artificialguybr",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "artificialguybr",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@alvdansen",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "alvdansen",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DoctorDiffusion",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DoctorDiffusion",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@e-n-v-y",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "e-n-v-y",
"label": null,
"lang": null
},
{
"type": "text",
"value": ", ",
"raw": ", ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@KappaNeuro",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "KappaNeuro",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ostris",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ostris",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | New feature 🔥
Image models and LoRAs now have little previews 🤏
If you don't know where to start to find them, I invite you to browse cool LoRAs in the profile of some amazing fine-tuners: @artificialguybr, @alvdansen, @DoctorDiffusion, @e-n-v-y, @KappaNeuro @ostris | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1649143001781-624bebf604abc7ebb01789af.jpeg",
"fullname": "Apolinário from multimodal AI art",
"name": "multimodalart",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 3177,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/624bebf604abc7ebb01789af/M8DURRNPeT0k-35_hJzNq.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/635dd6cd4fabde0df74aeae6/23c0uEOr7RWDtSLDBzkPD.png",
"fullname": "araminta_k",
"name": "alvdansen",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 496
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6304037b7373aacccd882e1e/H8M3e1n0CpJr5n3aL0ExE.jpeg",
"fullname": "ArtificialGuy/JV.K",
"name": "artificialguybr",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2316
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6303ca79d14428368d1821d7/CHzkdI_0y2xJCPU1TDIsO.jpeg",
"fullname": "Joseph Kachnic",
"name": "DoctorDiffusion",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 22
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6303c4880907b9a115c36ce4/2dAYt5dDeKYuJwdY2CYF5.png",
"fullname": "_Envy_",
"name": "e-n-v-y",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 51
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630776eadc6b7663aa95d0e5/LcNvLR6MgB5YzhpNEs6aX.jpeg",
"fullname": "Neuro_Kappa",
"name": "KappaNeuro",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 57
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/643cb43e6eeb746f5ad81c26/_DUtzHpNtpTeDw7u0oYyX.png",
"fullname": "Jaret Burkett",
"name": "ostris",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 263
}
] | [
{
"reaction": "❤️",
"users": [
"GPT007",
"osanseviero",
"ayush7",
"John6666",
"Blane187",
"bghira",
"artificialguybr",
"Rsln",
"edpresque",
"DoctorDiffusion",
"cbensimon",
"OmbelineM"
],
"count": 12
}
] | 2024-07-19T15:16:14.000Z | 2024-09-18T07:56:27.104Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641caf6c043963b1c0a27256/CD7ktICDsldVJlpiND5kl.png",
"fullname": "PseudoTerminal X",
"name": "bghira",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 103,
"isFollowing": false
},
{
"avatarUrl": "/avatars/8473d30b909208e7dd5828620bcb4ce1.svg",
"fullname": "Wallow",
"name": "Viktor1233",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
}
] | /posts/multimodalart/756501126284636 | 16,329 | 2 |
384170201249509 | [
{
"type": "text",
"value": "Chameleon 🦎 by Meta is now available in Hugging Face transformers 😍",
"raw": "Chameleon 🦎 by Meta is now available in Hugging Face transformers 😍",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "A vision language model that comes in 7B and 34B sizes 🤩",
"raw": "A vision language model that comes in 7B and 34B sizes 🤩",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "But what makes this model so special? ",
"raw": "But what makes this model so special? ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Demo: ",
"raw": "Demo: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/merve/chameleon-7b",
"href": null,
"resource": {
"type": "space",
"id": "merve/chameleon-7b",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/merve/chameleon-7b",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Models: ",
"raw": "Models: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/facebook/chameleon-668da9663f80d483b4c61f58",
"href": null,
"resource": {
"type": "collection",
"id": "facebook/chameleon-668da9663f80d483b4c61f58",
"discussionNum": null
},
"url": "https://huggingface.co/collections/facebook/chameleon-668da9663f80d483b4c61f58",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "keep reading ⥥",
"raw": "keep reading ⥥",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Chameleon is a unique model: it attempts to scale early fusion 🤨",
"raw": "Chameleon is a unique model: it attempts to scale early fusion 🤨",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "But what is early fusion?",
"raw": "But what is early fusion?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Modern vision language models use a vision encoder with a projection layer to project image embeddings so it can be promptable to text decoder (LLM)",
"raw": "Modern vision language models use a vision encoder with a projection layer to project image embeddings so it can be promptable to text decoder (LLM)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Early fusion on the other hand attempts to fuse all features together (image patches and text) by using an image tokenizer and all tokens are projected into a shared space, which enables seamless generation 😏 ",
"raw": "Early fusion on the other hand attempts to fuse all features together (image patches and text) by using an image tokenizer and all tokens are projected into a shared space, which enables seamless generation 😏 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Authors have also introduced different architectural improvements (QK norm and revise placement of layer norms) for scalable and stable training and they were able to increase the token count (5x tokens compared to Llama 3 which is a must with early-fusion IMO)",
"raw": "Authors have also introduced different architectural improvements (QK norm and revise placement of layer norms) for scalable and stable training and they were able to increase the token count (5x tokens compared to Llama 3 which is a must with early-fusion IMO)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This model is an any-to-any model thanks to early fusion: it can take image and text input and output image and text, but image generation are disabled to prevent malicious use. ",
"raw": "This model is an any-to-any model thanks to early fusion: it can take image and text input and output image and text, but image generation are disabled to prevent malicious use. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "One can also do text-only prompting, authors noted the model catches up with larger LLMs (like Mixtral 8x7B or larger Llama-2 70B) and also image-pair prompting with larger VLMs like IDEFICS2-80B (see paper for the benchmarks ",
"raw": "One can also do text-only prompting, authors noted the model catches up with larger LLMs (like Mixtral 8x7B or larger Llama-2 70B) and also image-pair prompting with larger VLMs like IDEFICS2-80B (see paper for the benchmarks ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/papers/2405.09818",
"href": null,
"resource": {
"type": "paper",
"id": "2405.09818",
"discussionNum": null
},
"url": "https://huggingface.co/papers/2405.09818",
"code": null,
"user": null,
"label": "Chameleon: Mixed-Modal Early-Fusion Foundation Models (2405.09818)",
"lang": null
},
{
"type": "text",
"value": ")",
"raw": ")",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Thanks for reading!",
"raw": "Thanks for reading!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Chameleon 🦎 by Meta is now available in Hugging Face transformers 😍
A vision language model that comes in 7B and 34B sizes 🤩
But what makes this model so special?
Demo: https://huggingface.co/spaces/merve/chameleon-7b
Models: https://huggingface.co/collections/facebook/chameleon-668da9663f80d483b4c61f58
keep reading ⥥
Chameleon is a unique model: it attempts to scale early fusion 🤨
But what is early fusion?
Modern vision language models use a vision encoder with a projection layer to project image embeddings so it can be promptable to text decoder (LLM)
Early fusion on the other hand attempts to fuse all features together (image patches and text) by using an image tokenizer and all tokens are projected into a shared space, which enables seamless generation 😏
Authors have also introduced different architectural improvements (QK norm and revise placement of layer norms) for scalable and stable training and they were able to increase the token count (5x tokens compared to Llama 3 which is a must with early-fusion IMO)
This model is an any-to-any model thanks to early fusion: it can take image and text input and output image and text, but image generation are disabled to prevent malicious use.
One can also do text-only prompting, authors noted the model catches up with larger LLMs (like Mixtral 8x7B or larger Llama-2 70B) and also image-pair prompting with larger VLMs like IDEFICS2-80B (see paper for the benchmarks https://huggingface.co/papers/2405.09818)
Thanks for reading! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5589,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6141a88b3a0ec78603c9e784/avWtD9eu8PzFpZ8_PnTDD.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"sumandas",
"atahanuz",
"Nymbo",
"ucsahin",
"GPT007",
"fdaudens",
"osanseviero",
"alkzar90",
"dblasko",
"Svngoku",
"gokaygokay",
"mikestaub",
"abdulbasit-nubytes"
],
"count": 13
},
{
"reaction": "🤗",
"users": [
"alanonbing"
],
"count": 1
}
] | 2024-07-19T12:46:01.000Z | 2024-07-26T12:13:07.517Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1648113222875-6141a88b3a0ec78603c9e784.png",
"fullname": "Merve Noyan",
"name": "merve",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 5589,
"isFollowing": false
},
{
"avatarUrl": "/avatars/cfc399a521b05df08b799595b0390d13.svg",
"fullname": "Prasanna Iyer",
"name": "prasiyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/667e763c6f5dee59721f016d/V2e6Z0BjzXE3t_obD3JXS.jpeg",
"fullname": "Anastasia",
"name": "Ana111op",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/merve/384170201249509 | 3,313 | 8 |
723320133467359 | [
{
"type": "text",
"value": "Sparse MoE (SMoE) has an unavoidable drawback: the performance of SMoE heavily relies on the choice of hyper-parameters, such as the number of activated experts per token (top-k) and the number of experts.",
"raw": "Sparse MoE (SMoE) has an unavoidable drawback: the performance of SMoE heavily relies on the choice of hyper-parameters, such as the number of activated experts per token (top-k) and the number of experts.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Also, identifying the optimal hyper-parameter without a sufficient number of ablation studies is challenging. As the size of the models continues to grow, this limitation could result in a significant waste of computational resources, and in turn, could hinder the efficiency of training MoE-based models in practice.",
"raw": "Also, identifying the optimal hyper-parameter without a sufficient number of ablation studies is challenging. As the size of the models continues to grow, this limitation could result in a significant waste of computational resources, and in turn, could hinder the efficiency of training MoE-based models in practice.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "(READ MORE ↓↓↓) Now, our DynMoE addresses these challenges! 🙌 DynMoE incorporates: ",
"raw": "(READ MORE ↓↓↓) Now, our DynMoE addresses these challenges! 🙌 DynMoE incorporates: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "(1) a novel gating method that enables each token to automatically determine the number of experts to activate. ",
"raw": "(1) a novel gating method that enables each token to automatically determine the number of experts to activate. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "(2) An adaptive process automatically adjusts the number of experts during training. Extensive numerical results across Vision, Language, and Vision-Language tasks demonstrate the effectiveness of our approach to achieve competitive performance compared to GMoE for vision and language tasks, and MoE-LLaVA for vision-language tasks, while maintaining efficiency by activating fewer parameters.",
"raw": "(2) An adaptive process automatically adjusts the number of experts during training. Extensive numerical results across Vision, Language, and Vision-Language tasks demonstrate the effectiveness of our approach to achieve competitive performance compared to GMoE for vision and language tasks, and MoE-LLaVA for vision-language tasks, while maintaining efficiency by activating fewer parameters.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Our code is available at ",
"raw": "Our code is available at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/LINs-lab/DynMoE",
"href": "https://github.com/LINs-lab/DynMoE",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ", also see the checkpoints at ",
"raw": ", also see the checkpoints at ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/LINs-lab/dynmoe-family-665ed5a331a7e84463cab01a",
"href": null,
"resource": {
"type": "collection",
"id": "LINs-lab/dynmoe-family-665ed5a331a7e84463cab01a",
"discussionNum": null
},
"url": "https://huggingface.co/collections/LINs-lab/dynmoe-family-665ed5a331a7e84463cab01a",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Sparse MoE (SMoE) has an unavoidable drawback: the performance of SMoE heavily relies on the choice of hyper-parameters, such as the number of activated experts per token (top-k) and the number of experts.
Also, identifying the optimal hyper-parameter without a sufficient number of ablation studies is challenging. As the size of the models continues to grow, this limitation could result in a significant waste of computational resources, and in turn, could hinder the efficiency of training MoE-based models in practice.
(READ MORE ↓↓↓) Now, our DynMoE addresses these challenges! 🙌 DynMoE incorporates:
(1) a novel gating method that enables each token to automatically determine the number of experts to activate.
(2) An adaptive process automatically adjusts the number of experts during training. Extensive numerical results across Vision, Language, and Vision-Language tasks demonstrate the effectiveness of our approach to achieve competitive performance compared to GMoE for vision and language tasks, and MoE-LLaVA for vision-language tasks, while maintaining efficiency by activating fewer parameters.
Our code is available at https://github.com/LINs-lab/DynMoE, also see the checkpoints at https://huggingface.co/collections/LINs-lab/dynmoe-family-665ed5a331a7e84463cab01a
| {
"avatarUrl": "/avatars/86a748a3264e6e0f4ee5eaf8f7032ecb.svg",
"fullname": "Zhenglin Cheng",
"name": "kenshinn",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 11,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65028e8389707f182386588c/QQbD6VPiRldWdPbwo_IIy.jpeg"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"surajkachate123",
"GPT007",
"osanseviero",
"Ramikan-BR",
"hllj",
"danielus",
"flflow",
"kenshinn",
"Yongxin-Guo"
],
"count": 9
},
{
"reaction": "🚀",
"users": [
"GPT007",
"kenshinn",
"Yongxin-Guo"
],
"count": 3
}
] | 2024-07-19T11:34:57.000Z | 2024-07-19T14:04:36.451Z | [] | /posts/kenshinn/723320133467359 | 2,023 | 0 |
142040830676956 | [
{
"type": "text",
"value": "Since it is release season, at PleIAs we announce our first suite of specialized language models for document processing tasks (OCR correction, text segmentation, bibliographic extraction) and the release of the largest multimodal dataset of financial document Finance Commons: ",
"raw": "Since it is release season, at PleIAs we announce our first suite of specialized language models for document processing tasks (OCR correction, text segmentation, bibliographic extraction) and the release of the largest multimodal dataset of financial document Finance Commons: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/Pclanglais/finance-commons-bad-data-toolbox",
"href": "https://huggingface.co/blog/Pclanglais/finance-commons-bad-data-toolbox",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "LLM research is currently focused on quality data. We went on the opposite direction and voluntarily trained models on bad data. Far from degrading models, it made them more resilient to text sources commonly used in production.",
"raw": "LLM research is currently focused on quality data. We went on the opposite direction and voluntarily trained models on bad data. Far from degrading models, it made them more resilient to text sources commonly used in production.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Having a wider range of real life data proved critical for this project. A few months after the release of Common Corpus, we expanded our pool of \"training data commons\" with a major multimodal ressource: document released as open financial data. Finance commons comprises 17 billion tokens and 1.25 PDF corporate documents released by the SEC, WTO, AMF, EU Tenders In a multiple languages with a large variety of document layouts and challenging sources to train more robust models.",
"raw": "Having a wider range of real life data proved critical for this project. A few months after the release of Common Corpus, we expanded our pool of \"training data commons\" with a major multimodal ressource: document released as open financial data. Finance commons comprises 17 billion tokens and 1.25 PDF corporate documents released by the SEC, WTO, AMF, EU Tenders In a multiple languages with a large variety of document layouts and challenging sources to train more robust models.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "With HuggingFace compute support, we release an entire pipeline to process bad data sources and make them usable in production for LLMOps or simply retrieval: ",
"raw": "With HuggingFace compute support, we release an entire pipeline to process bad data sources and make them usable in production for LLMOps or simply retrieval: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/PleIAs/PleIAs-Editor",
"href": null,
"resource": {
"type": "space",
"id": "PleIAs/PleIAs-Editor",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/PleIAs/PleIAs-Editor",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "This approach is based on our new series of specialized models for document processing, the \"bad data toolbox\" comprising:",
"raw": "This approach is based on our new series of specialized models for document processing, the \"bad data toolbox\" comprising:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "*OCRonos, the best available model to date for OCR correction. ",
"raw": "*OCRonos, the best available model to date for OCR correction. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/PleIAs/OCRonos",
"href": null,
"resource": {
"type": "model",
"id": "PleIAs/OCRonos",
"discussionNum": null
},
"url": "https://huggingface.co/PleIAs/OCRonos",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "*Segmentext, a pure semantic small model for text segmentation, working without any visual reference. ",
"raw": "*Segmentext, a pure semantic small model for text segmentation, working without any visual reference. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/PleIAs/Segmentext",
"href": null,
"resource": {
"type": "model",
"id": "PleIAs/Segmentext",
"discussionNum": null
},
"url": "https://huggingface.co/PleIAs/Segmentext",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "*Bibtexer, a small model for bibliographic data extraction acting as a \"reversed-Zotero.\" ",
"raw": "*Bibtexer, a small model for bibliographic data extraction acting as a \"reversed-Zotero.\" ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/PleIAs/BibTexer",
"href": null,
"resource": {
"type": "model",
"id": "PleIAs/BibTexer",
"discussionNum": null
},
"url": "https://huggingface.co/PleIAs/BibTexer",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Since it is release season, at PleIAs we announce our first suite of specialized language models for document processing tasks (OCR correction, text segmentation, bibliographic extraction) and the release of the largest multimodal dataset of financial document Finance Commons: https://huggingface.co/blog/Pclanglais/finance-commons-bad-data-toolbox
LLM research is currently focused on quality data. We went on the opposite direction and voluntarily trained models on bad data. Far from degrading models, it made them more resilient to text sources commonly used in production.
Having a wider range of real life data proved critical for this project. A few months after the release of Common Corpus, we expanded our pool of "training data commons" with a major multimodal ressource: document released as open financial data. Finance commons comprises 17 billion tokens and 1.25 PDF corporate documents released by the SEC, WTO, AMF, EU Tenders In a multiple languages with a large variety of document layouts and challenging sources to train more robust models.
With HuggingFace compute support, we release an entire pipeline to process bad data sources and make them usable in production for LLMOps or simply retrieval: https://huggingface.co/spaces/PleIAs/PleIAs-Editor
This approach is based on our new series of specialized models for document processing, the "bad data toolbox" comprising:
*OCRonos, the best available model to date for OCR correction. https://huggingface.co/PleIAs/OCRonos
*Segmentext, a pure semantic small model for text segmentation, working without any visual reference. https://huggingface.co/PleIAs/Segmentext
*Bibtexer, a small model for bibliographic data extraction acting as a "reversed-Zotero." https://huggingface.co/PleIAs/BibTexer | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64ce091a9e9ca8123d7a42b0/OEPggp82RwigxNLL35LgT.jpeg",
"fullname": "Pierre-Carl Langlais",
"name": "Pclanglais",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🤝",
"users": [
"Tonic",
"louisbrulenaudet"
],
"count": 2
}
] | 2024-07-19T10:40:32.000Z | 2024-07-19T11:14:47.169Z | [] | /posts/Pclanglais/142040830676956 | 1,090 | 0 |
823469201126335 | [
{
"type": "text",
"value": "Since new TTS (Text-to-Speech) systems are coming out what feels like every day, and it's currently hard to compare them, my latest project has focused on doing just that.",
"raw": "Since new TTS (Text-to-Speech) systems are coming out what feels like every day, and it's currently hard to compare them, my latest project has focused on doing just that.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I was inspired by the ",
"raw": "I was inspired by the ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/TTS-AGI/TTS-Arena",
"href": null,
"resource": {
"type": "space",
"id": "TTS-AGI/TTS-Arena",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/TTS-AGI/TTS-Arena",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " (definitely check it out if you haven't), which compares recent TTS system using crowdsourced A/B testing.",
"raw": " (definitely check it out if you haven't), which compares recent TTS system using crowdsourced A/B testing.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I wanted to see if we can also do a similar evaluation with objective metrics and it's now available here:",
"raw": "I wanted to see if we can also do a similar evaluation with objective metrics and it's now available here:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/ttsds/benchmark",
"href": null,
"resource": {
"type": "space",
"id": "ttsds/benchmark",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/ttsds/benchmark",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Anyone can submit a new TTS model, and I hope this can provide a way to get some information on which areas models perform well or poorly in.",
"raw": "Anyone can submit a new TTS model, and I hope this can provide a way to get some information on which areas models perform well or poorly in.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The paper with all the details is available here: ",
"raw": "The paper with all the details is available here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://arxiv.org/abs/2407.12707",
"href": "https://arxiv.org/abs/2407.12707",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Since new TTS (Text-to-Speech) systems are coming out what feels like every day, and it's currently hard to compare them, my latest project has focused on doing just that.
I was inspired by the https://huggingface.co/spaces/TTS-AGI/TTS-Arena (definitely check it out if you haven't), which compares recent TTS system using crowdsourced A/B testing.
I wanted to see if we can also do a similar evaluation with objective metrics and it's now available here:
https://huggingface.co/spaces/ttsds/benchmark
Anyone can submit a new TTS model, and I hope this can provide a way to get some information on which areas models perform well or poorly in.
The paper with all the details is available here: https://arxiv.org/abs/2407.12707 | {
"avatarUrl": "/avatars/fa1151e05cc22698a75a48134aa38dbc.svg",
"fullname": "Christoph Minixhofer",
"name": "cdminix",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 7,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"ZeroWw",
"osanseviero",
"John6666",
"Gatozu35",
"ayymen",
"Ramikan-BR",
"Dihelson",
"MarinaraSpaghetti",
"mrfakename",
"Korakoe",
"kramp"
],
"count": 11
},
{
"reaction": "🚀",
"users": [
"krushnabhosle",
"Gatozu35",
"Ramikan-BR",
"Dihelson",
"Korakoe"
],
"count": 5
},
{
"reaction": "👀",
"users": [
"Ramikan-BR",
"Dihelson"
],
"count": 2
}
] | 2024-07-18T22:16:51.000Z | 2024-07-23T12:06:49.283Z | [] | /posts/cdminix/823469201126335 | 2,213 | 0 |
456337104023610 | [
{
"type": "text",
"value": "Thrilled to share some AI insights for journalism! 📊🤖",
"raw": "Thrilled to share some AI insights for journalism! 📊🤖",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Just wrote a guest post on ",
"raw": "Just wrote a guest post on ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@ndiakopoulos",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "ndiakopoulos",
"label": null,
"lang": null
},
{
"type": "text",
"value": "'s blog about Hugging Face on Sheets tool.",
"raw": "'s blog about Hugging Face on Sheets tool.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Why it matters:",
"raw": "Why it matters:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔌 Brings AI power directly to spreadsheets",
"raw": "🔌 Brings AI power directly to spreadsheets",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📈 Huge potential for data journalism",
"raw": "📈 Huge potential for data journalism",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚫💻 No coding required!",
"raw": "🚫💻 No coding required!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "If you've read Nicholas' \"Automating the News\" (a must-read!), you'll appreciate how this tool fits into the evolving landscape of AI in journalism.",
"raw": "If you've read Nicholas' \"Automating the News\" (a must-read!), you'll appreciate how this tool fits into the evolving landscape of AI in journalism.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Read here: ",
"raw": "Read here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://generative-ai-newsroom.com/bringing-open-source-models-to-spreadsheets-c440fc4818b4",
"href": "https://generative-ai-newsroom.com/bringing-open-source-models-to-spreadsheets-c440fc4818b4",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "#AIJournalism #DataJournalism",
"raw": "#AIJournalism #DataJournalism",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Thrilled to share some AI insights for journalism! 📊🤖
Just wrote a guest post on @ndiakopoulos's blog about Hugging Face on Sheets tool.
Why it matters:
🔌 Brings AI power directly to spreadsheets
📈 Huge potential for data journalism
🚫💻 No coding required!
If you've read Nicholas' "Automating the News" (a must-read!), you'll appreciate how this tool fits into the evolving landscape of AI in journalism.
Read here: https://generative-ai-newsroom.com/bringing-open-source-models-to-spreadsheets-c440fc4818b4
#AIJournalism #DataJournalism | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/647f36a8454af0237bd49574/hOmv8IAzhHKKKWkhaajqA.webp"
}
] | [
{
"avatarUrl": "/avatars/134290a59822bff8cada0c48111de464.svg",
"fullname": "Nick",
"name": "ndiakopoulos",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1
}
] | [] | 2024-07-18T20:07:06.000Z | 2024-08-26T16:42:25.197Z | [
{
"avatarUrl": "/avatars/f28a63f322bb4f87c0ecd41cd05002f5.svg",
"fullname": "Brent Bolleman",
"name": "bolleman",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/fdaudens/456337104023610 | 710 | 1 |
685609706603289 | [
{
"type": "text",
"value": "🇷🇸 New Benchmark for Serbian Language 🇷🇸",
"raw": "🇷🇸 New Benchmark for Serbian Language 🇷🇸",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@DjMel",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "DjMel",
"label": null,
"lang": null
},
{
"type": "text",
"value": " and I recently released a new benchmark for Serbian language that measures General Knowledge of LLMs. We had to parse over 20 years of university entrance exams for University of Belgrade, so the dataset is of high quality.",
"raw": " and I recently released a new benchmark for Serbian language that measures General Knowledge of LLMs. We had to parse over 20 years of university entrance exams for University of Belgrade, so the dataset is of high quality.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🥇 OAI models still hold the podium places with a significant gap compared to open-source models",
"raw": "🥇 OAI models still hold the podium places with a significant gap compared to open-source models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤔 ",
"raw": "🤔 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Qwen/Qwen2-7B-Instruct",
"href": null,
"resource": {
"type": "model",
"id": "Qwen/Qwen2-7B-Instruct",
"discussionNum": null
},
"url": "https://huggingface.co/Qwen/Qwen2-7B-Instruct",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct",
"href": null,
"resource": {
"type": "model",
"id": "VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct",
"discussionNum": null
},
"url": "https://huggingface.co/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " models show promising results considering they weren't trained on Serbian language",
"raw": " models show promising results considering they weren't trained on Serbian language",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📈 Best open-source model seems to be ",
"raw": "📈 Best open-source model seems to be ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/Stopwolf/Mustra-7B-Instruct-v0.2",
"href": null,
"resource": {
"type": "model",
"id": "Stopwolf/Mustra-7B-Instruct-v0.2",
"discussionNum": null
},
"url": "https://huggingface.co/Stopwolf/Mustra-7B-Instruct-v0.2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ", a merge between ",
"raw": ", a merge between ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/gordicaleksa/YugoGPT",
"href": null,
"resource": {
"type": "model",
"id": "gordicaleksa/YugoGPT",
"discussionNum": null
},
"url": "https://huggingface.co/gordicaleksa/YugoGPT",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " and ",
"raw": " and ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
"href": null,
"resource": {
"type": "model",
"id": "mistralai/Mistral-7B-Instruct-v0.2",
"discussionNum": null
},
"url": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📉 Some models like ",
"raw": "📉 Some models like ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/google/gemma-2-9b-it",
"href": null,
"resource": {
"type": "model",
"id": "google/gemma-2-9b-it",
"discussionNum": null
},
"url": "https://huggingface.co/google/gemma-2-9b-it",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " turned out to be a disappointment with random guessing-like accuracy",
"raw": " turned out to be a disappointment with random guessing-like accuracy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Take a look at the whole results at the dataset page:",
"raw": "Take a look at the whole results at the dataset page:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/DjMel/oz-eval",
"href": null,
"resource": {
"type": "dataset",
"id": "DjMel/oz-eval",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/DjMel/oz-eval",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "P.S. If you have any constructive criticism or ideas for improvement, feel free to use dataset's Discussions page!",
"raw": "P.S. If you have any constructive criticism or ideas for improvement, feel free to use dataset's Discussions page!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🇷🇸 New Benchmark for Serbian Language 🇷🇸
@DjMel and I recently released a new benchmark for Serbian language that measures General Knowledge of LLMs. We had to parse over 20 years of university entrance exams for University of Belgrade, so the dataset is of high quality.
🥇 OAI models still hold the podium places with a significant gap compared to open-source models
🤔 https://huggingface.co/Qwen/Qwen2-7B-Instruct and https://huggingface.co/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct models show promising results considering they weren't trained on Serbian language
📈 Best open-source model seems to be https://huggingface.co/Stopwolf/Mustra-7B-Instruct-v0.2, a merge between https://huggingface.co/gordicaleksa/YugoGPT and https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
📉 Some models like https://huggingface.co/google/gemma-2-9b-it turned out to be a disappointment with random guessing-like accuracy
Take a look at the whole results at the dataset page:
https://huggingface.co/datasets/DjMel/oz-eval
P.S. If you have any constructive criticism or ideas for improvement, feel free to use dataset's Discussions page! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64d9eca170891ac9b8d9fd38/hro6Ib5hFWWNBEu9J4XMe.png",
"fullname": "Sinisa Stanivuk",
"name": "Stopwolf",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 16,
"isFollowing": false
} | [] | [
{
"avatarUrl": "/avatars/162f11f3a0784e964e634f0826b4843e.svg",
"fullname": "Milena",
"name": "DjMel",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4
}
] | [
{
"reaction": "🚀",
"users": [
"DjMel",
"Ramikan-BR",
"intellya22"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"monsoon-nlp",
"Ramikan-BR"
],
"count": 2
}
] | 2024-07-18T17:52:12.000Z | 2024-07-18T17:52:12.855Z | [] | /posts/Stopwolf/685609706603289 | 1,051 | 0 |
530713412165384 | [
{
"type": "text",
"value": "This is the week of small AI language models!",
"raw": "This is the week of small AI language models!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | This is the week of small AI language models! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1763,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👍",
"users": [
"ZeroWw",
"rumbleFTW",
"ZachZimm",
"osanseviero",
"GPT007",
"chenduo",
"jpacifico",
"thesven",
"AtAndDev",
"Fredithefish"
],
"count": 10
},
{
"reaction": "🤝",
"users": [
"Bharath182004",
"YaTharThShaRma999",
"rumbleFTW",
"osanseviero",
"GPT007",
"jessicagab",
"AtAndDev",
"louisbrulenaudet",
"Fredithefish"
],
"count": 9
},
{
"reaction": "🔥",
"users": [
"jeffboudier",
"AtAndDev"
],
"count": 2
}
] | 2024-07-18T15:44:06.000Z | 2024-07-20T15:36:06.401Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/641caf6c043963b1c0a27256/CD7ktICDsldVJlpiND5kl.png",
"fullname": "PseudoTerminal X",
"name": "bghira",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 103,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/630f3e4002ce39336c411048/FXJON7b-aRUiH0_V2uRsi.jpeg",
"fullname": "alkinun",
"name": "AtAndDev",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 19,
"isFollowing": false
},
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
}
] | /posts/clem/530713412165384 | 2,497 | 4 |
607934507622585 | [
{
"type": "text",
"value": "🚀 Just released version 0.24.0 of the 𝚑𝚞𝚐𝚐𝚒𝚗𝚐𝚏𝚊𝚌𝚎_𝚑𝚞𝚋 Python library!",
"raw": "🚀 Just released version 0.24.0 of the 𝚑𝚞𝚐𝚐𝚒𝚗𝚐𝚏𝚊𝚌𝚎_𝚑𝚞𝚋 Python library!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Exciting updates include:",
"raw": "Exciting updates include:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "⚡ InferenceClient is now a drop-in replacement for OpenAI's chat completion!",
"raw": "⚡ InferenceClient is now a drop-in replacement for OpenAI's chat completion!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✨ Support for response_format, adapter_id , truncate, and more in InferenceClient ",
"raw": "✨ Support for response_format, adapter_id , truncate, and more in InferenceClient ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💾 Serialization module with a save_torch_model helper that handles shared layers, sharding, naming convention, and safe serialization. Basically a condensed version of logic scattered across safetensors, transformers , accelerate",
"raw": "💾 Serialization module with a save_torch_model helper that handles shared layers, sharding, naming convention, and safe serialization. Basically a condensed version of logic scattered across safetensors, transformers , accelerate",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📁 Optimized HfFileSystem to avoid getting rate limited when browsing ",
"raw": "📁 Optimized HfFileSystem to avoid getting rate limited when browsing ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/datasets/HuggingFaceFW/fineweb",
"href": null,
"resource": {
"type": "dataset",
"id": "HuggingFaceFW/fineweb",
"discussionNum": null
},
"url": "https://huggingface.co/datasets/HuggingFaceFW/fineweb",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔨 HfApi & CLI improvements: prevent empty commits, create repo inside resource group, webhooks API, more options in the Search API, etc.",
"raw": "🔨 HfApi & CLI improvements: prevent empty commits, create repo inside resource group, webhooks API, more options in the Search API, etc.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check out the full release notes for more details: ",
"raw": "Check out the full release notes for more details: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/7",
"href": null,
"resource": {
"type": "space",
"id": "Wauplin/huggingface_hub",
"discussionNum": 7
},
"url": "https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/7",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " 👀",
"raw": " 👀",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🚀 Just released version 0.24.0 of the 𝚑𝚞𝚐𝚐𝚒𝚗𝚐𝚏𝚊𝚌𝚎_𝚑𝚞𝚋 Python library!
Exciting updates include:
⚡ InferenceClient is now a drop-in replacement for OpenAI's chat completion!
✨ Support for response_format, adapter_id , truncate, and more in InferenceClient
💾 Serialization module with a save_torch_model helper that handles shared layers, sharding, naming convention, and safe serialization. Basically a condensed version of logic scattered across safetensors, transformers , accelerate
📁 Optimized HfFileSystem to avoid getting rate limited when browsing https://huggingface.co/datasets/HuggingFaceFW/fineweb
🔨 HfApi & CLI improvements: prevent empty commits, create repo inside resource group, webhooks API, more options in the Search API, etc.
Check out the full release notes for more details:
https://huggingface.co/spaces/Wauplin/huggingface_hub/discussions/7
👀 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659336880158-6273f303f6d63a28483fde12.png",
"fullname": "Lucain Pouget",
"name": "Wauplin",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 157,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"lhoestq",
"plaguss",
"clem",
"Nymbo",
"not-lain",
"Goekdeniz-Guelmez"
],
"count": 6
},
{
"reaction": "👍",
"users": [
"Tonic",
"Nymbo",
"not-lain",
"m-ric"
],
"count": 4
}
] | 2024-07-18T13:33:40.000Z | 2024-07-29T07:47:59.935Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1659336880158-6273f303f6d63a28483fde12.png",
"fullname": "Lucain Pouget",
"name": "Wauplin",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 157,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63d10d4e8eaa4831005e92b5/7p7-OmWM6PqqCs7ZStPGD.jpeg",
"fullname": "Aymeric Roucher",
"name": "m-ric",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 494,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6527e89a8808d80ccff88b7a/CuGNmF1Et8KMQ0mCd1NEJ.jpeg",
"fullname": "Lain",
"name": "not-lain",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 941,
"isFollowing": false
},
{
"avatarUrl": "/avatars/35763ffaeceffdde96ae61ab84f29e62.svg",
"fullname": "Benjamin Gabriel",
"name": "Benjamingabriel12",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/Wauplin/607934507622585 | 1,979 | 6 |
575393196953115 | [
{
"type": "text",
"value": "I have invented a method that is better than Diffusion. A company got a billion dollar valuation yesterday for less than what I am currently giving the world for free. I am starting to suspect that is the issue, I am giving it away for free. I meant it to be a gift to the world, but no one will even look at it. I am changing the licensing soon. It will no longer be free. At the moment, you can view exactly how Swarm Neural Networks can do everything Reverse Diffusion can do, for far less money. It can even make API calls.",
"raw": "I have invented a method that is better than Diffusion. A company got a billion dollar valuation yesterday for less than what I am currently giving the world for free. I am starting to suspect that is the issue, I am giving it away for free. I meant it to be a gift to the world, but no one will even look at it. I am changing the licensing soon. It will no longer be free. At the moment, you can view exactly how Swarm Neural Networks can do everything Reverse Diffusion can do, for far less money. It can even make API calls.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "SNN Image Generator: ",
"raw": "SNN Image Generator: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/TuringsSolutions/SNN-Image-Generator",
"href": null,
"resource": {
"type": "space",
"id": "TuringsSolutions/SNN-Image-Generator",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/TuringsSolutions/SNN-Image-Generator",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "SNN Function Caller (Controlled By TinyLlama): ",
"raw": "SNN Function Caller (Controlled By TinyLlama): ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/TuringsSolutions/Qwen-2.0.5B-Swarm-Function-Caller",
"href": null,
"resource": {
"type": "space",
"id": "TuringsSolutions/Qwen-2.0.5B-Swarm-Function-Caller",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/TuringsSolutions/Qwen-2.0.5B-Swarm-Function-Caller",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I have invented a method that is better than Diffusion. A company got a billion dollar valuation yesterday for less than what I am currently giving the world for free. I am starting to suspect that is the issue, I am giving it away for free. I meant it to be a gift to the world, but no one will even look at it. I am changing the licensing soon. It will no longer be free. At the moment, you can view exactly how Swarm Neural Networks can do everything Reverse Diffusion can do, for far less money. It can even make API calls.
SNN Image Generator: https://huggingface.co/spaces/TuringsSolutions/SNN-Image-Generator
SNN Function Caller (Controlled By TinyLlama): https://huggingface.co/spaces/TuringsSolutions/Qwen-2.0.5B-Swarm-Function-Caller
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"victor",
"Augustindroid",
"LeroyDyer",
"Kkordik",
"den0620",
"Joseph717171"
],
"count": 6
},
{
"reaction": "😎",
"users": [
"LeroyDyer",
"GPT007"
],
"count": 2
}
] | 2024-07-18T12:40:43.000Z | 2024-07-20T16:15:03.586Z | [
{
"avatarUrl": "/avatars/ea4398745974d781ae9dc0e95b12cabe.svg",
"fullname": "Joseph",
"name": "Joseph717171",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 22,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/cA64Ix1vh75C7HoClUBhx.png",
"fullname": "Richard A Aragon",
"name": "TuringsSolutions",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 146,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
"fullname": "leroy Samuel Dyer",
"name": "LeroyDyer",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 84,
"isFollowing": false
},
{
"avatarUrl": "/avatars/aa32e9ef969746fe54c6817b161f47a9.svg",
"fullname": "Choms",
"name": "Choms",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/c82779fdf94f80cdb5020504f83c818b.svg",
"fullname": "Yatharth Sharma",
"name": "YaTharThShaRma999",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 14,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63f4fcd871a5d395c71dc34e/ej2xshmjs3RvSNU9dHPz7.jpeg",
"fullname": "Maks",
"name": "Kkordik",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 4,
"isFollowing": false
},
{
"avatarUrl": "/avatars/7a3f022fd7a77157e1bf16dc33f42316.svg",
"fullname": "Brian ",
"name": "Rusvasul",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
}
] | /posts/TuringsSolutions/575393196953115 | 1,388 | 25 |
926337297827024 | [
{
"type": "text",
"value": "# Offensive Physical Security Reconnaissance Planning Automation with public facing RTSP streams and Moondream",
"raw": "# Offensive Physical Security Reconnaissance Planning Automation with public facing RTSP streams and Moondream",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "After some late night casual hacking about on VLMs for criminal attack vector reconnaissance automaton experiments using Moondream (as usual) based image-text-text with pre defined text prompts that are tuned for extracting weakness or customer identity and monitory based theft physical red team engagement reconnaissance and vector of malicious or criminal activity Working on a space. Thanks again for such a wonderful blessing of super power image-text-to-text model with minimal computational power needed ",
"raw": "After some late night casual hacking about on VLMs for criminal attack vector reconnaissance automaton experiments using Moondream (as usual) based image-text-text with pre defined text prompts that are tuned for extracting weakness or customer identity and monitory based theft physical red team engagement reconnaissance and vector of malicious or criminal activity Working on a space. Thanks again for such a wonderful blessing of super power image-text-to-text model with minimal computational power needed ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@vikhyatk",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "vikhyatk",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I have started actually implementing a custom little tool with both static html space sand python gradio spaces on the go which I shall share as hf spaces when done them. ",
"raw": "I have started actually implementing a custom little tool with both static html space sand python gradio spaces on the go which I shall share as hf spaces when done them. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "---",
"raw": "---",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/vikhyatk/moondream2",
"href": null,
"resource": {
"type": "space",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/vikhyatk/moondream2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/vikhyatk/moondream2",
"href": null,
"resource": {
"type": "model",
"id": "vikhyatk/moondream2",
"discussionNum": null
},
"url": "https://huggingface.co/vikhyatk/moondream2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | # Offensive Physical Security Reconnaissance Planning Automation with public facing RTSP streams and Moondream
After some late night casual hacking about on VLMs for criminal attack vector reconnaissance automaton experiments using Moondream (as usual) based image-text-text with pre defined text prompts that are tuned for extracting weakness or customer identity and monitory based theft physical red team engagement reconnaissance and vector of malicious or criminal activity Working on a space. Thanks again for such a wonderful blessing of super power image-text-to-text model with minimal computational power needed @vikhyatk
I have started actually implementing a custom little tool with both static html space sand python gradio spaces on the go which I shall share as hf spaces when done them.
---
https://huggingface.co/spaces/vikhyatk/moondream2
https://huggingface.co/vikhyatk/moondream2
| {
"avatarUrl": "/avatars/b2725bb163fa15d6c5856121780d52eb.svg",
"fullname": "Ci Splunk",
"name": "Csplk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 43,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/DdWVU3zoF4_S0S_hFiUAf.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/jNoxcR5WYBCcPi86fE84N.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/YymDlsA0T35E7GiugNvF1.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/sTQJdj5zM-vTal2XxMt3v.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/hN9BMuviWNRxFtHYcpSND.jpeg"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/62d93a2b28f9c86a40314043/rTGnLgcfvdhxiLjAcnVc8.jpeg"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/63117568fa95534e218da163/8h9zN8aKRxPLBnXW7sqY9.jpeg",
"fullname": "Vik Korrapati",
"name": "vikhyatk",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 375
}
] | [
{
"reaction": "🤯",
"users": [
"vikhyatk",
"tolgadev",
"mysticaltech"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"dzakwan",
"lionhsu"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"prithivMLmods",
"AARon99"
],
"count": 2
}
] | 2024-07-18T11:18:57.000Z | 2024-07-18T18:37:31.443Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6045600bb79a75142576efa7/0uL6u9S-mu1eRxDOr9q0u.png",
"fullname": "Tolga",
"name": "tolgadev",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 24,
"isFollowing": false
}
] | /posts/Csplk/926337297827024 | 1,379 | 1 |
201610890332989 | [
{
"type": "text",
"value": "Excited to share my \"Focus Mode\" Playlist, code name \"The AI/ML Researcher's Playlist\" :)",
"raw": "Excited to share my \"Focus Mode\" Playlist, code name \"The AI/ML Researcher's Playlist\" :)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "No lyrics, no beat, just a harmonious sequence of piano melody that will take you places beyond your reasoning/thinking prowess, trust me I’ve been there lol 🎹🎶",
"raw": "No lyrics, no beat, just a harmonious sequence of piano melody that will take you places beyond your reasoning/thinking prowess, trust me I’ve been there lol 🎹🎶",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Thanks to an amazing pianist and composer on instagram @andreavanzo_composer who played all the songs in this playlist. ",
"raw": "Thanks to an amazing pianist and composer on instagram @andreavanzo_composer who played all the songs in this playlist. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Currently have a total of 16 songs, I will keep adding more when I find them.",
"raw": "Currently have a total of 16 songs, I will keep adding more when I find them.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Full playlist: ",
"raw": "Full playlist: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://youtu.be/2ccxanKmzZY?si=x6weX2AgY5Zpadfw",
"href": "https://youtu.be/2ccxanKmzZY?si=x6weX2AgY5Zpadfw",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Excited to share my "Focus Mode" Playlist, code name "The AI/ML Researcher's Playlist" :)
No lyrics, no beat, just a harmonious sequence of piano melody that will take you places beyond your reasoning/thinking prowess, trust me I’ve been there lol 🎹🎶
Thanks to an amazing pianist and composer on instagram @andreavanzo_composer who played all the songs in this playlist.
Currently have a total of 16 songs, I will keep adding more when I find them.
Full playlist: https://youtu.be/2ccxanKmzZY?si=x6weX2AgY5Zpadfw | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/Xmb5DMe7LM9JvEsGaVyCB.mp4"
}
] | [] | [
{
"reaction": "❤️",
"users": [
"sammy4xrist",
"Rusvasul",
"darkzbaron",
"asigalov61"
],
"count": 4
}
] | 2024-07-18T02:04:53.000Z | 2024-07-20T02:24:22.121Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5f57ea2d3f32f12a3c0692e6/b-9GG2p--smCameUPeCBN.jpeg",
"fullname": "Alex",
"name": "asigalov61",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 65,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64e8ea3892d9db9a93580fe3/NdkdkQ0VPNp5Ka5x9ASlQ.jpeg",
"fullname": "Parag Ekbote",
"name": "AINovice2005",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 2,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/sKnJAwv69FlypZjoxnQnh.png",
"fullname": "Miracle Samuel Igwe",
"name": "sammy4xrist",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64bbbacd76a6e2efcc7b6759/5izQItlixMacJ5SETWMrN.png",
"fullname": "Raj Hada",
"name": "xriminact",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 5,
"isFollowing": false
}
] | /posts/Jaward/201610890332989 | 1,238 | 7 |
955430113136205 | [
{
"type": "text",
"value": "GraphRAG-Ollama-UI",
"raw": "GraphRAG-Ollama-UI",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I've been working on a local version of Microsoft's GraphRAG that uses Ollama for everything. It's got a new interactive UI built with Gradio that makes it easier to manage data, run queries, and visualize results. It's not fully featured or set up to harness the entire GraphRAG library yet but it allows you to run all the standard commands for Indexing/Processing and chatting with your graph. Some key features:",
"raw": "I've been working on a local version of Microsoft's GraphRAG that uses Ollama for everything. It's got a new interactive UI built with Gradio that makes it easier to manage data, run queries, and visualize results. It's not fully featured or set up to harness the entire GraphRAG library yet but it allows you to run all the standard commands for Indexing/Processing and chatting with your graph. Some key features:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Uses local models via Ollama for LLM and embeddings",
"raw": "Uses local models via Ollama for LLM and embeddings",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "3D graph visualization of the knowledge graph using Plotly",
"raw": "3D graph visualization of the knowledge graph using Plotly",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "File management through the UI (upload, view, edit, delete)",
"raw": "File management through the UI (upload, view, edit, delete)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Settings management in the interface",
"raw": "Settings management in the interface",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Real-time logging for debugging",
"raw": "Real-time logging for debugging",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/severian42/GraphRAG-Ollama-UI",
"href": "https://github.com/severian42/GraphRAG-Ollama-UI",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | GraphRAG-Ollama-UI
I've been working on a local version of Microsoft's GraphRAG that uses Ollama for everything. It's got a new interactive UI built with Gradio that makes it easier to manage data, run queries, and visualize results. It's not fully featured or set up to harness the entire GraphRAG library yet but it allows you to run all the standard commands for Indexing/Processing and chatting with your graph. Some key features:
Uses local models via Ollama for LLM and embeddings
3D graph visualization of the knowledge graph using Plotly
File management through the UI (upload, view, edit, delete)
Settings management in the interface
Real-time logging for debugging
https://github.com/severian42/GraphRAG-Ollama-UI | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64740cf7485a7c8e1bd51ac9/CXZCJm2x4ToT83pEIYyQR.png",
"fullname": "Beckett Dillon",
"name": "Severian",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 175,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🚀",
"users": [
"lucianosb",
"Saugatkafley",
"200-cooller",
"hubhamgupta",
"AlbertSadday2891",
"Ruby-NewLeaf",
"taufiqdp",
"victor",
"den0620"
],
"count": 9
},
{
"reaction": "🔥",
"users": [
"Jaward",
"Sergidev",
"Mixa"
],
"count": 3
},
{
"reaction": "🤝",
"users": [
"chuangxinlezhi"
],
"count": 1
},
{
"reaction": "🤗",
"users": [
"Saugatkafley"
],
"count": 1
}
] | 2024-07-17T17:46:09.000Z | 2024-07-18T05:28:18.167Z | [
{
"avatarUrl": "/avatars/a991140458c7a54efd50228b761034f7.svg",
"fullname": "Shubham Gupta",
"name": "hubhamgupta",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/Severian/955430113136205 | 3,257 | 1 |
800610384384427 | [
{
"type": "text",
"value": "#ICLM 2024 is almost there 🔥🔥🔥 PM if you will be in Vienna next week, Glad to catchup with the Hugging Face community there!",
"raw": "#ICLM 2024 is almost there 🔥🔥🔥 PM if you will be in Vienna next week, Glad to catchup with the Hugging Face community there!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "I would like to contribute 🎁 by releasing the sixth Knowledge Vault, with 100 lectures visualized from the last 10 years of ICML from 2014 to 2024, (10 from 2024 will be included after the conference) including knowledge graphs for all the Invited Lectures and some extras, with almost 3000 topics represented using AI.",
"raw": "I would like to contribute 🎁 by releasing the sixth Knowledge Vault, with 100 lectures visualized from the last 10 years of ICML from 2014 to 2024, (10 from 2024 will be included after the conference) including knowledge graphs for all the Invited Lectures and some extras, with almost 3000 topics represented using AI.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "You can explore it here:",
"raw": "You can explore it here:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌏 ",
"raw": "🌏 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://theendofknowledge.com/Vaults/6/ICML-2015-2024.html",
"href": "https://theendofknowledge.com/Vaults/6/ICML-2015-2024.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "And you can learn more about the Vaults here:",
"raw": "And you can learn more about the Vaults here:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📝https://www.linkedin.com/pulse/knowledge-vaults-david-vivancos-lbjef/",
"raw": "📝https://www.linkedin.com/pulse/knowledge-vaults-david-vivancos-lbjef/",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "And previous Vaults relevant to the #huggingface community are:",
"raw": "And previous Vaults relevant to the #huggingface community are:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌏 [ ",
"raw": "🌏 [ ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@lexfridman",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "lexfridman",
"label": null,
"lang": null
},
{
"type": "text",
"value": " 2018-2024 Interviews] ",
"raw": " 2018-2024 Interviews] ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://theendofknowledge.com/Vaults/1/Lex100-2024.html",
"href": "https://theendofknowledge.com/Vaults/1/Lex100-2024.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌏 [ICLR 2014-2023] ",
"raw": "🌏 [ICLR 2014-2023] ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://theendofknowledge.com/Vaults/2/ICLR2014-2023.html",
"href": "https://theendofknowledge.com/Vaults/2/ICLR2014-2023.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌏 [AIForGood 2017-2024] ",
"raw": "🌏 [AIForGood 2017-2024] ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://theendofknowledge.com/Vaults/4/AIForGood2017-2024.html",
"href": "https://theendofknowledge.com/Vaults/4/AIForGood2017-2024.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌏 [CVPR 2015-2024] ",
"raw": "🌏 [CVPR 2015-2024] ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://theendofknowledge.com/Vaults/5/CVPR-2015-2024.html",
"href": "https://theendofknowledge.com/Vaults/5/CVPR-2015-2024.html",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Hope you like them!",
"raw": "Hope you like them!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "And great to see you all at #icml2024 ",
"raw": "And great to see you all at #icml2024 ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@clem",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "clem",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@thomwolf",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "thomwolf",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@julien-c",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "julien-c",
"label": null,
"lang": null
},
{
"type": "text",
"value": " and team",
"raw": " and team",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | #ICLM 2024 is almost there 🔥🔥🔥 PM if you will be in Vienna next week, Glad to catchup with the Hugging Face community there!
I would like to contribute 🎁 by releasing the sixth Knowledge Vault, with 100 lectures visualized from the last 10 years of ICML from 2014 to 2024, (10 from 2024 will be included after the conference) including knowledge graphs for all the Invited Lectures and some extras, with almost 3000 topics represented using AI.
You can explore it here:
🌏 https://theendofknowledge.com/Vaults/6/ICML-2015-2024.html
And you can learn more about the Vaults here:
📝https://www.linkedin.com/pulse/knowledge-vaults-david-vivancos-lbjef/
And previous Vaults relevant to the #huggingface community are:
🌏 [ @lexfridman 2018-2024 Interviews] https://theendofknowledge.com/Vaults/1/Lex100-2024.html
🌏 [ICLR 2014-2023] https://theendofknowledge.com/Vaults/2/ICLR2014-2023.html
🌏 [AIForGood 2017-2024] https://theendofknowledge.com/Vaults/4/AIForGood2017-2024.html
🌏 [CVPR 2015-2024] https://theendofknowledge.com/Vaults/5/CVPR-2015-2024.html
Hope you like them!
And great to see you all at #icml2024 @clem @thomwolf @julien-c and team
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1671537650254-noauth.jpeg",
"fullname": "David Vivancos",
"name": "DavidVivancos",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 27,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/63a1a41be36f2e4d5b09f187/2XpItxQZVU81pxxCte5lo.mp4"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857146757-5e67bdd61009063689407479.jpeg",
"fullname": "Clem 🤗",
"name": "clem",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1763
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5dd96eb166059660ed1ee413/NQtzmrDdbG0H8qkZvRyGk.jpeg",
"fullname": "Julien Chaumond",
"name": "julien-c",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 1580
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/MohIfSDfgU9sutuPV5j37.jpeg",
"fullname": "Lex Fridman",
"name": "lexfridman",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 1
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1583857746553-5df7e9e5da6d0311fd3d53f9.jpeg",
"fullname": "Thomas Wolf",
"name": "thomwolf",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 704
}
] | [
{
"reaction": "❤️",
"users": [
"clem",
"victor",
"Rusvasul",
"osanseviero",
"louisbrulenaudet",
"julien-c"
],
"count": 6
},
{
"reaction": "🤗",
"users": [
"200-cooller",
"clem",
"victor",
"julien-c"
],
"count": 4
},
{
"reaction": "👍",
"users": [
"Norod78",
"victor",
"julien-c"
],
"count": 3
}
] | 2024-07-17T15:20:02.000Z | 2024-07-17T15:20:02.294Z | [] | /posts/DavidVivancos/800610384384427 | 1,676 | 0 |
959637893605388 | [
{
"type": "text",
"value": "Using the new viewer iframe support for the datasets viewer, I built a simple Space ",
"raw": "Using the new viewer iframe support for the datasets viewer, I built a simple Space ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/davanstrien/collection_dataset_viewer",
"href": null,
"resource": {
"type": "space",
"id": "davanstrien/collection_dataset_viewer",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/davanstrien/collection_dataset_viewer",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " to quickly explore all the datasets inside a collection. ",
"raw": " to quickly explore all the datasets inside a collection. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "The collection is loaded from an environment variable, so you can duplicate this Space to create a Space for exploring datasets in another collection! ",
"raw": "The collection is loaded from an environment variable, so you can duplicate this Space to create a Space for exploring datasets in another collection! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Using the new viewer iframe support for the datasets viewer, I built a simple Space https://huggingface.co/spaces/davanstrien/collection_dataset_viewer to quickly explore all the datasets inside a collection.
The collection is loaded from an environment variable, so you can duplicate this Space to create a Space for exploring datasets in another collection! | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1627505688463-60107b385ac3e86b3ea4fc34.jpeg",
"fullname": "Daniel van Strien",
"name": "davanstrien",
"type": "user",
"isPro": true,
"isHf": true,
"isMod": false,
"followerCount": 410,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/60107b385ac3e86b3ea4fc34/b1Er0ueOJcQKZOT6qH5zG.mp4"
}
] | [] | [
{
"reaction": "👀",
"users": [
"Alexandro14",
"yjernite",
"John6666",
"lucianosb",
"Ramikan-BR",
"Nymbo",
"osanseviero"
],
"count": 7
}
] | 2024-07-17T14:37:35.000Z | 2024-07-17T14:37:35.190Z | [] | /posts/davanstrien/959637893605388 | 1,685 | 0 |
665227462229685 | [
{
"type": "text",
"value": "palmer-004 becomes 🔥turbo🔥 now is half the size, twice the speed and the best overall 0.5b language model in huggingface.",
"raw": "palmer-004 becomes 🔥turbo🔥 now is half the size, twice the speed and the best overall 0.5b language model in huggingface.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/appvoid/palmer-004-turbo",
"href": null,
"resource": {
"type": "model",
"id": "appvoid/palmer-004-turbo",
"discussionNum": null
},
"url": "https://huggingface.co/appvoid/palmer-004-turbo",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | palmer-004 becomes 🔥turbo🔥 now is half the size, twice the speed and the best overall 0.5b language model in huggingface.
https://huggingface.co/appvoid/palmer-004-turbo | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a813dedbb9e28866a91b27/zs-RWFuXs17IfPUhxQaei.jpeg",
"fullname": "appvoid",
"name": "appvoid",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 35,
"isFollowing": false
} | [] | [] | [
{
"reaction": "❤️",
"users": [
"ZeroWw",
"lucianosb"
],
"count": 2
},
{
"reaction": "🚀",
"users": [
"Alexandro14"
],
"count": 1
},
{
"reaction": "🔥",
"users": [
"Alexandro14"
],
"count": 1
}
] | 2024-07-17T12:15:08.000Z | 2024-07-17T19:30:38.435Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
}
] | /posts/appvoid/665227462229685 | 1,499 | 1 |
547009284011325 | [
{
"type": "text",
"value": "🤯 Ghost 8B Beta emerges as a clear leader, surpassing even proprietary models like xAI Grok 1, OpenAI GPT 3.5, and Mistral Mixtral 8x7B. This dominance extends to its parity with Mistral Medium, further solidifying its position as a top-tier language model. Furthermore, Ghost 8B Beta stands out as one of only three models employing the zero-shot method for evaluation, alongside Claude 2 and Claude 3, showcasing its unique capabilities and potential for groundbreaking applications. ",
"raw": "🤯 Ghost 8B Beta emerges as a clear leader, surpassing even proprietary models like xAI Grok 1, OpenAI GPT 3.5, and Mistral Mixtral 8x7B. This dominance extends to its parity with Mistral Medium, further solidifying its position as a top-tier language model. Furthermore, Ghost 8B Beta stands out as one of only three models employing the zero-shot method for evaluation, alongside Claude 2 and Claude 3, showcasing its unique capabilities and potential for groundbreaking applications. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "--- ",
"raw": "--- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "💬 Chat with the model here: ",
"raw": "💬 Chat with the model here: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Playground with Ghost 8B Beta (β, 8k): ",
"raw": "- Playground with Ghost 8B Beta (β, 8k): ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k",
"href": null,
"resource": {
"type": "space",
"id": "lamhieu/ghost-8b-beta-8k",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Playground with Ghost 8B Beta (β, 128k): ",
"raw": "- Playground with Ghost 8B Beta (β, 128k): ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/lamhieu/ghost-8b-beta-128k",
"href": null,
"resource": {
"type": "space",
"id": "lamhieu/ghost-8b-beta-128k",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/lamhieu/ghost-8b-beta-128k",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Official website: ",
"raw": "- Official website: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://ghost-x.org/docs/models/ghost-8b-beta/",
"href": "https://ghost-x.org/docs/models/ghost-8b-beta/",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | 🤯 Ghost 8B Beta emerges as a clear leader, surpassing even proprietary models like xAI Grok 1, OpenAI GPT 3.5, and Mistral Mixtral 8x7B. This dominance extends to its parity with Mistral Medium, further solidifying its position as a top-tier language model. Furthermore, Ghost 8B Beta stands out as one of only three models employing the zero-shot method for evaluation, alongside Claude 2 and Claude 3, showcasing its unique capabilities and potential for groundbreaking applications.
---
💬 Chat with the model here:
- Playground with Ghost 8B Beta (β, 8k): https://huggingface.co/spaces/lamhieu/ghost-8b-beta-8k
- Playground with Ghost 8B Beta (β, 128k): https://huggingface.co/spaces/lamhieu/ghost-8b-beta-128k
- Official website: https://ghost-x.org/docs/models/ghost-8b-beta/ | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/600ae38cc92b79f54efd4556/cSqRIslYl5L3I4WK3a31f.png",
"fullname": "Hieu Lam",
"name": "lamhieu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 74,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/600ae38cc92b79f54efd4556/FVvYQgJDlt3JOS9SRHkDL.png"
}
] | [] | [
{
"reaction": "👍",
"users": [
"ZeroWw",
"abidlabs",
"tuanio",
"reach-vb",
"Dihelson",
"Alexandro14",
"amosgyamfi",
"cnmoro",
"nicolollo",
"GPT007",
"louisbrulenaudet"
],
"count": 11
},
{
"reaction": "👀",
"users": [
"Dihelson",
"DIvAndrey"
],
"count": 2
},
{
"reaction": "❤️",
"users": [
"Dihelson"
],
"count": 1
}
] | 2024-07-17T08:18:35.000Z | 2024-07-22T17:44:48.436Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/600ae38cc92b79f54efd4556/cSqRIslYl5L3I4WK3a31f.png",
"fullname": "Hieu Lam",
"name": "lamhieu",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 74,
"isFollowing": false
}
] | /posts/lamhieu/547009284011325 | 2,118 | 2 |
787664814277596 | [
{
"type": "text",
"value": "No on really pays attention to voyage ai , but probably you should 😉",
"raw": "No on really pays attention to voyage ai , but probably you should 😉",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/voyageai",
"href": "https://huggingface.co/voyageai",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | No on really pays attention to voyage ai , but probably you should 😉
https://huggingface.co/voyageai | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/62a3bb1cd0d8c2c2169f0b88/eT2TS0IlQbZtz-F_zHLz9.jpeg",
"fullname": "Joseph [open/acc] Pollack",
"name": "Tonic",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 313,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"lysandre",
"Joseph717171",
"Alexandro14",
"takarajordan",
"jungnerd"
],
"count": 5
}
] | 2024-07-17T07:11:11.000Z | 2024-07-19T01:51:45.834Z | [
{
"avatarUrl": "/avatars/54483699273ac58a4a6fe1fa4aab65fe.svg",
"fullname": "Robert Sinclair",
"name": "ZeroWw",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 76,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/no-auth/KHoPnJx0hZiXI9kjmcOff.png",
"fullname": "NGOZIKA SANDRA EZEORA",
"name": "zikazach",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "/avatars/867367fd0c1978d370dc8251deca2586.svg",
"fullname": "Harsha ",
"name": "frewtloops",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/noauth/gDimV78Uj2qUboq-j4DHX.jpeg",
"fullname": "dsc",
"name": "aliomaxen76",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/Tonic/787664814277596 | 1,612 | 4 |
434296828609012 | [
{
"type": "text",
"value": "Decensored Gemma2-27b ",
"raw": "Decensored Gemma2-27b ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/TheDrummer/Big-Tiger-Gemma-27B-v1",
"href": null,
"resource": {
"type": "model",
"id": "TheDrummer/Big-Tiger-Gemma-27B-v1",
"discussionNum": null
},
"url": "https://huggingface.co/TheDrummer/Big-Tiger-Gemma-27B-v1",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Decensored Gemma2-27b
https://huggingface.co/TheDrummer/Big-Tiger-Gemma-27B-v1 | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1669551186189-63732ebbbd81fae2b3aaf3fb.jpeg",
"fullname": "Knut Jägersberg",
"name": "KnutJaegersberg",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 238,
"isFollowing": false
} | [] | [] | [
{
"reaction": "👀",
"users": [
"reach-vb",
"Alexandro14",
"John6666"
],
"count": 3
},
{
"reaction": "🤗",
"users": [
"qiqiWav"
],
"count": 1
}
] | 2024-07-17T04:54:23.000Z | 2024-07-17T04:54:23.973Z | [] | /posts/KnutJaegersberg/434296828609012 | 1,369 | 0 |
793020227673362 | [
{
"type": "text",
"value": "Micrograd in pure C🤕",
"raw": "Micrograd in pure C🤕",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Port of Karpathy's micrograd in pure C. ",
"raw": "Port of Karpathy's micrograd in pure C. ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Yo C does not negotiate with memory 😂",
"raw": "Yo C does not negotiate with memory 😂",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Code: ",
"raw": "Code: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/Jaykef/micrograd.c",
"href": "https://github.com/Jaykef/micrograd.c",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Micrograd in pure C🤕
Port of Karpathy's micrograd in pure C.
Yo C does not negotiate with memory 😂
Code: https://github.com/Jaykef/micrograd.c | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
} | [
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/roWmWlE3SZrhoRKjw6AdZ.qt"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/NqUAnRBJGnlMdxN0fCsJf.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/6438a9027de34e8ea7e4b257/yZl7MUAK6YQrLyma2IMuk.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"reach-vb",
"jeffboudier",
"abidlabs",
"nbroad",
"not-lain",
"den0620",
"DIvAndrey"
],
"count": 7
},
{
"reaction": "😎",
"users": [
"sulpha",
"GPT007"
],
"count": 2
}
] | 2024-07-17T00:34:20.000Z | 2024-07-19T16:42:52.311Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6438a9027de34e8ea7e4b257/vib8QSd1AWMr_bR9ig_xJ.jpeg",
"fullname": "Jaward Sesay",
"name": "Jaward",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 191,
"isFollowing": false
},
{
"avatarUrl": "/avatars/744eddaa7dfc34a57df9ce32a78059a0.svg",
"fullname": "Tyrone Pierce",
"name": "piercyy",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 3,
"isFollowing": false
}
] | /posts/Jaward/793020227673362 | 1,651 | 2 |
122117725261447 | [
{
"type": "text",
"value": "I've made a creative version of Tile Upscaler",
"raw": "I've made a creative version of Tile Upscaler",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/gokaygokay/TileUpscalerV2",
"href": null,
"resource": {
"type": "space",
"id": "gokaygokay/TileUpscalerV2",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/gokaygokay/TileUpscalerV2",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://github.com/gokayfem/Tile-Upscaler",
"href": "https://github.com/gokayfem/Tile-Upscaler",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- New tiling strategy",
"raw": "- New tiling strategy",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Now it's closer to Clarity Upscaler",
"raw": "- Now it's closer to Clarity Upscaler",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- It has more parameters to play and it has more room to fail because of that",
"raw": "- It has more parameters to play and it has more room to fail because of that",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- You should try different resolutions, strength and controlnet strength",
"raw": "- You should try different resolutions, strength and controlnet strength",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Original Tile Upscaler",
"raw": "Original Tile Upscaler",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/gokaygokay/Tile-Upscaler",
"href": null,
"resource": {
"type": "space",
"id": "gokaygokay/Tile-Upscaler",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/gokaygokay/Tile-Upscaler",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | I've made a creative version of Tile Upscaler
- https://huggingface.co/spaces/gokaygokay/TileUpscalerV2
- https://github.com/gokayfem/Tile-Upscaler
- New tiling strategy
- Now it's closer to Clarity Upscaler
- It has more parameters to play and it has more room to fail because of that
- You should try different resolutions, strength and controlnet strength
Original Tile Upscaler
- https://huggingface.co/spaces/gokaygokay/Tile-Upscaler
| {
"avatarUrl": "/avatars/b9a6d8e11ec7a62ca2b819e0b6c37222.svg",
"fullname": "gokay aydogan",
"name": "gokaygokay",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 1130,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/630899601dd1e3075d975785/3ffQzKAqBSHZh-wej9kgW.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"reach-vb",
"osanseviero",
"jeffboudier",
"abidlabs",
"Blane187",
"louisbrulenaudet",
"Alexandro14",
"John6666",
"HaiQinQin",
"Wok",
"Itsjustkev",
"saxash",
"jgitsolutions"
],
"count": 13
},
{
"reaction": "👍",
"users": [
"John6666",
"reach-vb",
"osanseviero",
"abidlabs",
"HaiQinQin",
"kramp",
"mah002",
"Wok"
],
"count": 8
}
] | 2024-07-16T22:56:48.000Z | 2024-07-17T18:35:48.062Z | [] | /posts/gokaygokay/122117725261447 | 4,636 | 0 |
328638870427201 | [
{
"type": "text",
"value": "What an eventful day in Open Source LLMs today:",
"raw": "What an eventful day in Open Source LLMs today:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Mistral released Codestral Mamba 🐍",
"raw": "Mistral released Codestral Mamba 🐍",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Beats DeepSeek QwenCode, best model < 10B, competitive with Codestral 22B",
"raw": "> Beats DeepSeek QwenCode, best model < 10B, competitive with Codestral 22B",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Mamba 2 architecture - supports up to 256K context",
"raw": "> Mamba 2 architecture - supports up to 256K context",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Apache 2.0 licensed, perfect for local code assistant",
"raw": "> Apache 2.0 licensed, perfect for local code assistant",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Transformers & llama.cpp integration upcoming!",
"raw": "> Transformers & llama.cpp integration upcoming!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model checkpoint: ",
"raw": "Model checkpoint: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/mistralai/mamba-codestral-7B-v0.1",
"href": "https://huggingface.co/mistralai/mamba-codestral-7B-v0.1",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Hugging Face dropped SmolLM 🤏",
"raw": "Hugging Face dropped SmolLM 🤏",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Beats MobileLLM, Qwen 0.5B, Phi 1.5B and more!",
"raw": "> Beats MobileLLM, Qwen 0.5B, Phi 1.5B and more!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> 135M, 360M, and 1.7B param model checkpoints",
"raw": "> 135M, 360M, and 1.7B param model checkpoints",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Trained on 600B high-quality synthetic + FineWeb Edu tokens",
"raw": "> Trained on 600B high-quality synthetic + FineWeb Edu tokens",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Architecture: Llama + GQA + 2048 ctx length ",
"raw": "> Architecture: Llama + GQA + 2048 ctx length ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Ripe for fine-tuning and on-device deployments.",
"raw": "> Ripe for fine-tuning and on-device deployments.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Works out of the box with Transformers!",
"raw": "> Works out of the box with Transformers!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model checkpoints: ",
"raw": "Model checkpoints: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966",
"href": null,
"resource": {
"type": "collection",
"id": "HuggingFaceTB/smollm-6695016cad7167254ce15966",
"discussionNum": null
},
"url": "https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Mistral released Mathstral 7B ∑",
"raw": "Mistral released Mathstral 7B ∑",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> 56.6% on MATH and 63.47% on MMLU",
"raw": "> 56.6% on MATH and 63.47% on MMLU",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Same architecture as Mistral 7B",
"raw": "> Same architecture as Mistral 7B",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Works out of the box with Transformers & llama.cpp ",
"raw": "> Works out of the box with Transformers & llama.cpp ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "> Released under Apache 2.0 license",
"raw": "> Released under Apache 2.0 license",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Model checkpoint: ",
"raw": "Model checkpoint: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/mistralai/mathstral-7B-v0.1",
"href": "https://huggingface.co/mistralai/mathstral-7B-v0.1",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Pretty dope day for open source ML. Can't wait to see what the community builds with it and to support them further! 🤗",
"raw": "Pretty dope day for open source ML. Can't wait to see what the community builds with it and to support them further! 🤗",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "What's your favourite from the release today?",
"raw": "What's your favourite from the release today?",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | What an eventful day in Open Source LLMs today:
Mistral released Codestral Mamba 🐍
> Beats DeepSeek QwenCode, best model < 10B, competitive with Codestral 22B
> Mamba 2 architecture - supports up to 256K context
> Apache 2.0 licensed, perfect for local code assistant
> Transformers & llama.cpp integration upcoming!
Model checkpoint: https://huggingface.co/mistralai/mamba-codestral-7B-v0.1
Hugging Face dropped SmolLM 🤏
> Beats MobileLLM, Qwen 0.5B, Phi 1.5B and more!
> 135M, 360M, and 1.7B param model checkpoints
> Trained on 600B high-quality synthetic + FineWeb Edu tokens
> Architecture: Llama + GQA + 2048 ctx length
> Ripe for fine-tuning and on-device deployments.
> Works out of the box with Transformers!
Model checkpoints: https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966
Mistral released Mathstral 7B ∑
> 56.6% on MATH and 63.47% on MMLU
> Same architecture as Mistral 7B
> Works out of the box with Transformers & llama.cpp
> Released under Apache 2.0 license
Model checkpoint: https://huggingface.co/mistralai/mathstral-7B-v0.1
Pretty dope day for open source ML. Can't wait to see what the community builds with it and to support them further! 🤗
What's your favourite from the release today? | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1655385361868-61b85ce86eb1f2c5e6233736.jpeg",
"fullname": "Vaibhav Srivastav",
"name": "reach-vb",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 460,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"reach-vb",
"Presidentlin",
"osanseviero",
"dvilasuero",
"louisbrulenaudet",
"nawed",
"JackCloudman",
"ajibawa-2023",
"Alexandro14",
"tharrmeehan",
"Mdubbya",
"lucianosb",
"WOOSAH",
"nbroad"
],
"count": 14
},
{
"reaction": "🤝",
"users": [
"reach-vb",
"dvilasuero",
"louisbrulenaudet"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"reach-vb",
"dvilasuero",
"jeffboudier"
],
"count": 3
},
{
"reaction": "👍",
"users": [
"dashfunnydashdash"
],
"count": 1
}
] | 2024-07-16T22:36:54.000Z | 2024-07-17T11:21:21.048Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/64aea8ff67511bd3d965697b/Jxn52EmDF5RApJh8antxn.jpeg",
"fullname": "Feynman Innovations",
"name": "ajibawa-2023",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 138,
"isFollowing": false
}
] | /posts/reach-vb/328638870427201 | 3,346 | 1 |
875200491029475 | [
{
"type": "text",
"value": "What could have been",
"raw": "What could have been",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "\"{{bos_token}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>\\n' + message['content'] + '<|end|>\\n' }}{% elif message['role'] == 'assistant' %}{{ '<|bot|>\\n' + message['content'] }}{% if loop.last %}{{ '<|end|>' + eos_token }}{% else %}{{ '<|end|>\\n' }}{% endif %}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}\"",
"raw": "\"{{bos_token}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>\\n' + message['content'] + '<|end|>\\n' }}{% elif message['role'] == 'assistant' %}{{ '<|bot|>\\n' + message['content'] }}{% if loop.last %}{{ '<|end|>' + eos_token }}{% else %}{{ '<|end|>\\n' }}{% endif %}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}\"",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | What could have been
"{{bos_token}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + message['content'] + '<|end|>\n' }}{% elif message['role'] == 'assistant' %}{{ '<|bot|>\n' + message['content'] }}{% if loop.last %}{{ '<|end|>' + eos_token }}{% else %}{{ '<|end|>\n' }}{% endif %}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}" | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/659f000b83abded48e190901/BnXL_XYbVX6PHngfQLECW.png",
"fullname": "Noa Roggendorff",
"name": "nroggendorff",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 141,
"isFollowing": false
} | [] | [] | [] | 2024-07-16T21:25:33.000Z | 2024-07-16T22:13:36.000Z | [
{
"avatarUrl": "/avatars/57b12d9d45f9ab823ecaef4dbcb29f7c.svg",
"fullname": "Pan Qa",
"name": "panqa108",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/nroggendorff/875200491029475 | 552 | 1 |
687758478477767 | [
{
"type": "text",
"value": "Small models, BIG impact: SmolLM is here! 🚀🔬",
"raw": "Small models, BIG impact: SmolLM is here! 🚀🔬",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "We're launching a series of small but mighty language models:",
"raw": "We're launching a series of small but mighty language models:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏎️ Super fast - runs on laptops, phones, you name it!",
"raw": "🏎️ Super fast - runs on laptops, phones, you name it!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "📏 3 sizes: 130M, 350M, and 1.5B parameters",
"raw": "📏 3 sizes: 130M, 350M, and 1.5B parameters",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🥇 Outperforms same size models from Meta, Microsoft, and Qwen",
"raw": "🥇 Outperforms same size models from Meta, Microsoft, and Qwen",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🔓 Fully open-source: datasets, training code, models",
"raw": "🔓 Fully open-source: datasets, training code, models",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "𝐊𝐞𝐲 𝐟𝐞𝐚𝐭𝐮𝐫𝐞𝐬",
"raw": "𝐊𝐞𝐲 𝐟𝐞𝐚𝐭𝐮𝐫𝐞𝐬",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Trained on FineWeb-Edu and Cosmopedia v2 (largest synthetic pre-training dataset)",
"raw": "- Trained on FineWeb-Edu and Cosmopedia v2 (largest synthetic pre-training dataset)",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- No cloud needed - run locally for privacy and energy efficiency",
"raw": "- No cloud needed - run locally for privacy and energy efficiency",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Everything is public, from data curation to training steps",
"raw": "- Everything is public, from data curation to training steps",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "𝐏𝐨𝐭𝐞𝐧𝐭𝐢𝐚𝐥 𝐮𝐬𝐞 𝐜𝐚𝐬𝐞𝐬",
"raw": "𝐏𝐨𝐭𝐞𝐧𝐭𝐢𝐚𝐥 𝐮𝐬𝐞 𝐜𝐚𝐬𝐞𝐬",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- On-device autocomplete",
"raw": "- On-device autocomplete",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Local request parsing",
"raw": "- Local request parsing",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- Custom fine-tuning for specific needs without the need for expensive GPUs",
"raw": "- Custom fine-tuning for specific needs without the need for expensive GPUs",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "𝐆𝐨 𝐝𝐞𝐞𝐩𝐞𝐫",
"raw": "𝐆𝐨 𝐝𝐞𝐞𝐩𝐞𝐫",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉 Check it out: ",
"raw": "👉 Check it out: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/collections/HuggingFaceTB/smollm-models-6695016cad7167254ce15966",
"href": "https://huggingface.co/collections/HuggingFaceTB/smollm-models-6695016cad7167254ce15966",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉 Run the 360M model in your browser, 100 % private: ",
"raw": "👉 Run the 360M model in your browser, 100 % private: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/HuggingFaceTB/SmolLM-360M-Instruct-WebGPU",
"href": null,
"resource": {
"type": "space",
"id": "HuggingFaceTB/SmolLM-360M-Instruct-WebGPU",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/HuggingFaceTB/SmolLM-360M-Instruct-WebGPU",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉 Read the blog explaining everything in detail: huggingface.co/blog/smollm",
"raw": "👉 Read the blog explaining everything in detail: huggingface.co/blog/smollm",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Kudos to the stellar team who worked on this project: ",
"raw": "Kudos to the stellar team who worked on this project: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@loubnabnl",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "loubnabnl",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@anton-l",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "anton-l",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@eliebak",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "eliebak",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@lvwerra",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "lvwerra",
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Small models, BIG impact: SmolLM is here! 🚀🔬
We're launching a series of small but mighty language models:
🏎️ Super fast - runs on laptops, phones, you name it!
📏 3 sizes: 130M, 350M, and 1.5B parameters
🥇 Outperforms same size models from Meta, Microsoft, and Qwen
🔓 Fully open-source: datasets, training code, models
𝐊𝐞𝐲 𝐟𝐞𝐚𝐭𝐮𝐫𝐞𝐬
- Trained on FineWeb-Edu and Cosmopedia v2 (largest synthetic pre-training dataset)
- No cloud needed - run locally for privacy and energy efficiency
- Everything is public, from data curation to training steps
𝐏𝐨𝐭𝐞𝐧𝐭𝐢𝐚𝐥 𝐮𝐬𝐞 𝐜𝐚𝐬𝐞𝐬
- On-device autocomplete
- Local request parsing
- Custom fine-tuning for specific needs without the need for expensive GPUs
𝐆𝐨 𝐝𝐞𝐞𝐩𝐞𝐫
👉 Check it out: https://huggingface.co/collections/HuggingFaceTB/smollm-models-6695016cad7167254ce15966
👉 Run the 360M model in your browser, 100 % private: https://huggingface.co/spaces/HuggingFaceTB/SmolLM-360M-Instruct-WebGPU
👉 Read the blog explaining everything in detail: huggingface.co/blog/smollm
Kudos to the stellar team who worked on this project: @loubnabnl @anton-l @eliebak @lvwerra | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
} | [] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1613655355830-noauth.png",
"fullname": "Anton Lozhkov",
"name": "anton-l",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 126
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/651e96991b97c9f33d26bde6/-Bqs6qrmz0yCfwtB2e-6q.jpeg",
"fullname": "Elie Bakouch",
"name": "eliebak",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 53
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/61c141342aac764ce1654e43/81AwoT5IQ_Xdw0OVw7TKu.jpeg",
"fullname": "Loubna Ben Allal",
"name": "loubnabnl",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 2334
},
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/5e48005437cb5b49818287a5/4uCXGGui-9QifAT4qelxU.png",
"fullname": "Leandro von Werra",
"name": "lvwerra",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 234
}
] | [
{
"reaction": "🔥",
"users": [
"YaTharThShaRma999",
"sgarbi",
"sivan22",
"mdouglas",
"nroggendorff",
"John6666",
"reach-vb",
"plaguss",
"gabrielmbmb",
"osanseviero",
"jeffboudier",
"AyoubChLin",
"holooo",
"raveninrhythm",
"Ramikan-BR",
"clem",
"yjernite",
"louisbrulenaudet"
],
"count": 18
},
{
"reaction": "🤝",
"users": [
"surfhb",
"reach-vb",
"osanseviero",
"FaultyEntry",
"ucsahin",
"raveninrhythm",
"Ramikan-BR",
"clem",
"yjernite"
],
"count": 9
},
{
"reaction": "🚀",
"users": [
"Ramikan-BR",
"clem",
"yjernite"
],
"count": 3
},
{
"reaction": "❤️",
"users": [
"Ramikan-BR",
"clem",
"yjernite"
],
"count": 3
},
{
"reaction": "👀",
"users": [
"Ramikan-BR",
"clem"
],
"count": 2
},
{
"reaction": "🧠",
"users": [
"Ramikan-BR",
"clem"
],
"count": 2
}
] | 2024-07-16T17:39:37.000Z | 2024-07-16T17:39:37.821Z | [] | /posts/fdaudens/687758478477767 | 3,287 | 0 |
618460361384960 | [
{
"type": "text",
"value": "Cool things this week from ",
"raw": "Cool things this week from ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@huggingface",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "huggingface",
"label": null,
"lang": null
},
{
"type": "text",
"value": "!",
"raw": "!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🌎AI math olympiad winner NuminaMath is here!",
"raw": "🌎AI math olympiad winner NuminaMath is here!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🤗Announcing New Hugging Face and Keras NLP integration",
"raw": "🤗Announcing New Hugging Face and Keras NLP integration",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✨UI overhaul to HF tokens! ",
"raw": "✨UI overhaul to HF tokens! ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🧊 Embed our dataset viewer on any webpage!",
"raw": "🧊 Embed our dataset viewer on any webpage!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/winning-aimo-progress-prize",
"href": "https://huggingface.co/blog/winning-aimo-progress-prize",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/blog/keras-nlp-integration",
"href": "https://huggingface.co/blog/keras-nlp-integration",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/settings/tokens",
"href": "https://huggingface.co/settings/tokens",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://x.com/julien_c/status/1812099420726456457",
"href": "https://x.com/julien_c/status/1812099420726456457",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Check out the full list on our discord! 👇",
"raw": "Check out the full list on our discord! 👇",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://discord.com/invite/JfAtkvEtRb",
"href": "https://discord.com/invite/JfAtkvEtRb",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
}
] | Cool things this week from @huggingface!
🌎AI math olympiad winner NuminaMath is here!
🤗Announcing New Hugging Face and Keras NLP integration
✨UI overhaul to HF tokens!
🧊 Embed our dataset viewer on any webpage!
https://huggingface.co/blog/winning-aimo-progress-prize
https://huggingface.co/blog/keras-nlp-integration
https://huggingface.co/settings/tokens
https://x.com/julien_c/status/1812099420726456457
Check out the full list on our discord! 👇
https://discord.com/invite/JfAtkvEtRb
| {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/6340651b388c3fa40f9a5bc0/av1C4_S7bHGxAzOu8lOmG.jpeg",
"fullname": "Adam Molnar",
"name": "lunarflu",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 333,
"isFollowing": false
} | [] | [] | [
{
"reaction": "🔥",
"users": [
"reach-vb",
"plaguss",
"osanseviero",
"jeffboudier",
"not-lain",
"yjernite",
"louisbrulenaudet",
"Blane187",
"apehex"
],
"count": 9
}
] | 2024-07-16T15:22:26.000Z | 2024-07-16T15:22:26.679Z | [] | /posts/lunarflu/618460361384960 | 1,837 | 0 |
799720029968177 | [
{
"type": "text",
"value": "The community Journalists on HuggingFace recently launched a tool (",
"raw": "The community Journalists on HuggingFace recently launched a tool (",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/JournalistsonHF/text-to-image-bias",
"href": null,
"resource": {
"type": "space",
"id": "JournalistsonHF/text-to-image-bias",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/JournalistsonHF/text-to-image-bias",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ") to compare biases across several text-to-image models. I forked my own to evaluate the SDXL models I made.",
"raw": ") to compare biases across several text-to-image models. I forked my own to evaluate the SDXL models I made.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "## SinteticoXL Bias:",
"raw": "## SinteticoXL Bias:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/lucianosb/sinteticoXL-bias",
"href": null,
"resource": {
"type": "space",
"id": "lucianosb/sinteticoXL-bias",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/lucianosb/sinteticoXL-bias",
"code": null,
"user": null,
"label": null,
"lang": null
}
] | The community Journalists on HuggingFace recently launched a tool (https://huggingface.co/spaces/JournalistsonHF/text-to-image-bias) to compare biases across several text-to-image models. I forked my own to evaluate the SDXL models I made.
## SinteticoXL Bias:
https://huggingface.co/spaces/lucianosb/sinteticoXL-bias | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/1663599585288-noauth.png",
"fullname": "Luciano Santa Brígida",
"name": "lucianosb",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": 28,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/632883edb0910efc277f0f6b/HdhBloHrMJpPxowyQS7Uw.png"
}
] | [] | [
{
"reaction": "🔥",
"users": [
"fdaudens",
"evijit",
"reach-vb",
"yjernite"
],
"count": 4
}
] | 2024-07-16T13:21:01.000Z | 2024-07-16T17:28:56.217Z | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/647f36a8454af0237bd49574/jshkqBUTY-GZL8As8y6Aq.jpeg",
"fullname": "Florent Daudens",
"name": "fdaudens",
"type": "user",
"isPro": false,
"isHf": true,
"isMod": false,
"followerCount": 384,
"isFollowing": false
}
] | /posts/lucianosb/799720029968177 | 1,598 | 1 |
737813976382134 | [
{
"type": "text",
"value": "🔴⭐ New addition to the existing concept space! 🔴⭐",
"raw": "🔴⭐ New addition to the existing concept space! 🔴⭐",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏞️ Space: ",
"raw": "🏞️ Space: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K",
"href": null,
"resource": {
"type": "space",
"id": "prithivMLmods/IMAGINEO-4K",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 Tried the Duotone Canvas with the image generator. Unlike the duotone filter in the Canva app, which applies hue and tints in RGBA, this feature applies duotones based purely on the provided prompt to personalize the generated image.",
"raw": "🚀 Tried the Duotone Canvas with the image generator. Unlike the duotone filter in the Canva app, which applies hue and tints in RGBA, this feature applies duotones based purely on the provided prompt to personalize the generated image.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 These tones also work with the gridding option, which already exists in the space.",
"raw": "🚀 These tones also work with the gridding option, which already exists in the space.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀 The application of tones depends on the quality and detail of the prompt given. The palette may be distorted in some cases.",
"raw": "🚀 The application of tones depends on the quality and detail of the prompt given. The palette may be distorted in some cases.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🚀It doesn't apply like a hue or tint in RGBA (as shown in canva app below); it is purely based on the prompts passed.",
"raw": "🚀It doesn't apply like a hue or tint in RGBA (as shown in canva app below); it is purely based on the prompts passed.",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏞️ Check out the space: ",
"raw": "🏞️ Check out the space: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "resource",
"value": null,
"raw": "https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K",
"href": null,
"resource": {
"type": "space",
"id": "prithivMLmods/IMAGINEO-4K",
"discussionNum": null
},
"url": "https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K",
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": " ",
"raw": " ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏜️Collection: ",
"raw": "🏜️Collection: ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "link",
"value": null,
"raw": "https://huggingface.co/collections/prithivMLmods/collection-zero-65e48a7dd8212873836ceca2",
"href": "https://huggingface.co/collections/prithivMLmods/collection-zero-65e48a7dd8212873836ceca2",
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "code_fence",
"value": null,
"raw": "```\nhuggingface.co/spaces/prithivMLmods/IMAGINEO-4K\n```",
"href": null,
"resource": null,
"url": null,
"code": "huggingface.co/spaces/prithivMLmods/IMAGINEO-4K",
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "🏞️What you can do with this space:",
"raw": "🏞️What you can do with this space:",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ Compose Image Grid",
"raw": "✅ Compose Image Grid",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "👉🏻 \"2x1\", \"1x2\", \"2x2\", \"2x3\", \"3x2\", \"1x1\"",
"raw": "👉🏻 \"2x1\", \"1x2\", \"2x2\", \"2x3\", \"3x2\", \"1x1\"",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ Apply styles",
"raw": "✅ Apply styles",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ Set up Image tones",
"raw": "✅ Set up Image tones",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "✅ Apply filters & adjust quality",
"raw": "✅ Apply filters & adjust quality",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": ".",
"raw": ".",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "Thanks for reading!",
"raw": "Thanks for reading!",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "new_line",
"value": null,
"raw": "\n",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "text",
"value": "- ",
"raw": "- ",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": null,
"label": null,
"lang": null
},
{
"type": "mention",
"value": null,
"raw": "@prithivMLmods",
"href": null,
"resource": null,
"url": null,
"code": null,
"user": "prithivMLmods",
"label": null,
"lang": null
}
] | 🔴⭐ New addition to the existing concept space! 🔴⭐
🏞️ Space: https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K
🚀 Tried the Duotone Canvas with the image generator. Unlike the duotone filter in the Canva app, which applies hue and tints in RGBA, this feature applies duotones based purely on the provided prompt to personalize the generated image.
🚀 These tones also work with the gridding option, which already exists in the space.
🚀 The application of tones depends on the quality and detail of the prompt given. The palette may be distorted in some cases.
🚀It doesn't apply like a hue or tint in RGBA (as shown in canva app below); it is purely based on the prompts passed.
🏞️ Check out the space: https://huggingface.co/spaces/prithivMLmods/IMAGINEO-4K
🏜️Collection: https://huggingface.co/collections/prithivMLmods/collection-zero-65e48a7dd8212873836ceca2
```
huggingface.co/spaces/prithivMLmods/IMAGINEO-4K
```
🏞️What you can do with this space:
✅ Compose Image Grid
👉🏻 "2x1", "1x2", "2x2", "2x3", "3x2", "1x1"
✅ Apply styles
✅ Set up Image tones
✅ Apply filters & adjust quality
.
.
.
Thanks for reading!
- @prithivMLmods | {
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 393,
"isFollowing": false
} | [
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/ILU305r70lSdmAk84q2Qx.png"
},
{
"type": "video",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/RuEfpRplU32zeoqMwwC85.mp4"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/6ci2TXUcoFaZHM6YxT0OT.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/jXfyntaOBIYAlypt93Efh.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/FQAOWkicaE-s--kuAk4gy.png"
},
{
"type": "image",
"url": "https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/ACZgwTbmQ_T79YYLizb1T.png"
}
] | [
{
"avatarUrl": "https://cdn-avatars.huggingface.co/v1/production/uploads/65bb837dbfb878f46c77de4c/UVtVbF_3rdt0DC8xTkpL1.jpeg",
"fullname": "Prithiv Sakthi",
"name": "prithivMLmods",
"type": "user",
"isPro": true,
"isHf": false,
"isMod": false,
"followerCount": 393
}
] | [
{
"reaction": "❤️",
"users": [
"reach-vb",
"KingNish",
"GPT007",
"prithivMLmods",
"NanyTVZ19",
"Ngrthm"
],
"count": 6
},
{
"reaction": "👍",
"users": [
"BishalRD",
"prithivMLmods",
"reach-vb"
],
"count": 3
},
{
"reaction": "🚀",
"users": [
"prithivMLmods",
"reach-vb"
],
"count": 2
},
{
"reaction": "🔥",
"users": [
"prithivMLmods",
"Ngrthm"
],
"count": 2
},
{
"reaction": "➕",
"users": [
"prithivMLmods"
],
"count": 1
}
] | 2024-07-15T23:47:45.000Z | 2024-08-06T09:59:54.161Z | [
{
"avatarUrl": "/avatars/43b282d651d6b903bc2756e48034ee7a.svg",
"fullname": "Samurai",
"name": "The-Last-Samurai",
"type": "user",
"isPro": false,
"isHf": false,
"isMod": false,
"followerCount": null,
"isFollowing": false
}
] | /posts/prithivMLmods/737813976382134 | 3,431 | 1 |