File size: 2,612 Bytes
c27eae8 2e32068 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
{
"header": {
"licensing": "Creative Commons 4.0",
"formatVersion": "0.1",
"reportId": "2f4643f7-68b5-4fb6-21f0-b5dcda04897d",
"reportDatetime": "2025-02-26 16:57:00",
"reportStatus": "draft",
"publisher": {
"name": "sopra steria",
"confidentialityLevel": "public"
}
},
"task": {
"taskStage": "inference",
"taskFamily": "chatbot",
"nbRequest": 1,
"algorithms": [
{
"algorithmType": "llm",
"foundationModelName": "llama2-13b",
"foundationModelUri":"https://huggingface.co/meta-llama/Llama-2-13b-hf",
"framework": "vllm",
"parametersNumber": 13,
"quantization": "q16"
}
],
"dataset": [
{
"dataUsage": "input",
"dataType": "token",
"dataQuantity": 11
},
{
"dataUsage": "output",
"dataType": "token",
"dataQuantity": 828
}
],
"estimatedAccuracy":"veryGood"
},
"measures": [
{
"measurementMethod": "codecarbon",
"version": "2.5.0",
"cpuTrackingMode": "constant",
"gpuTrackingMode": "nvml",
"powerConsumption": 0.00267074,
"measurementDuration": 19.09390426,
"measurementDateTime": "2024-09-30 09:09:40"
}
],
"system": {
"os": "linux"
},
"software": {
"language": "python",
"version": "3.10.12"
},
"infrastructure":{
"infraType": "publicCloud",
"cloudProvider": "ovh",
"components": [
{
"componentName": "Intel(R) Xeon(R) Gold 6226R CPU @ 2.90GHz",
"componentType": "cpu",
"nbComponent": 30,
"manufacturer": "Intel",
"family": "Xeon",
"series": "Gold 6226R"
},
{
"componentName": "2 x Tesla V100S-PCIE-32GB",
"componentType": "gpu",
"nbComponent": 2,
"memorySize": 32,
"manufacturer": "Tesla",
"family": "V100"
},
{
"componentType": "ram",
"nbComponent": 1,
"memorySize": 86
}
]
},
"environment": {
"country": "france",
"powerSupplierType": "public"
},
"quality": "high"
} |