Llama-3.1-8B-Lexi-Uncensored-V2 / Orenguteng_Llama-3.1-8B-Lexi-Uncensored-V2.json
sabato-nocera's picture
add AIBOM
0c1711b verified
raw
history blame
3.4 kB
{
"bomFormat": "CycloneDX",
"specVersion": "1.6",
"serialNumber": "urn:uuid:68e31c27-69fa-4eba-98f4-af6c900705dd",
"version": 1,
"metadata": {
"timestamp": "2025-06-05T09:40:41.667208+00:00",
"component": {
"type": "machine-learning-model",
"bom-ref": "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2-326049c7-7efd-54be-8c7a-90827ace56c1",
"name": "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2",
"externalReferences": [
{
"url": "https://huggingface.co/Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2",
"type": "documentation"
}
],
"modelCard": {
"modelParameters": {
"task": "text-generation",
"architectureFamily": "llama",
"modelArchitecture": "LlamaForCausalLM"
},
"properties": [
{
"name": "library_name",
"value": "transformers"
}
],
"quantitativeAnalysis": {
"performanceMetrics": [
{
"slice": "dataset: HuggingFaceH4/ifeval",
"type": "inst_level_strict_acc and prompt_level_strict_acc",
"value": 77.92
},
{
"slice": "dataset: BBH",
"type": "acc_norm",
"value": 29.69
},
{
"slice": "dataset: hendrycks/competition_math",
"type": "exact_match",
"value": 16.92
},
{
"slice": "dataset: Idavidrein/gpqa",
"type": "acc_norm",
"value": 4.36
},
{
"slice": "dataset: TAUR-Lab/MuSR",
"type": "acc_norm",
"value": 7.77
},
{
"slice": "dataset: TIGER-Lab/MMLU-Pro, split: test, config: main",
"type": "acc",
"value": 30.9
}
]
}
},
"authors": [
{
"name": "Orenguteng"
}
],
"licenses": [
{
"license": {
"name": "llama3.1"
}
}
],
"tags": [
"transformers",
"safetensors",
"llama",
"text-generation",
"conversational",
"license:llama3.1",
"model-index",
"autotrain_compatible",
"text-generation-inference",
"endpoints_compatible",
"region:us"
]
}
}
}