Add Ramikan-BR/tinyllama-coder-py-v12 to eval queue
Browse files
Ramikan-BR/tinyllama-coder-py-v12_eval_request_False_None_16bit_float16_float16.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "Ramikan-BR/tinyllama-coder-py-v12", "revision": "abd0469", "private": false, "params": 2.2, "architectures": "LlamaForCausalLM", "quant_type": null, "precision": "16bit", "model_params": 1.1, "model_size": 2.2, "weight_dtype": "float16", "compute_dtype": "float16", "gguf_ftype": "*Q4_0.gguf", "hardware": "gpu", "status": "Pending", "submitted_time": "2024-05-28T21:46:32Z", "model_type": "original", "job_id": -1, "job_start_time": null, "scripts": "ITREX"}
|