Gemma2-LB-mT5 / config.json
KoseiUemura's picture
Add Yoruba fine-tuned BGE-M3 model
79a2b53 verified
raw
history blame
305 Bytes
{
"alignments": "linear",
"architectures": [
"LangBridgeModel"
],
"dim_enc": 2048,
"dim_lm": 3584,
"enc": "DKYoon/mt5-xl-lm-adapt",
"freeze_encoder": true,
"freeze_language_model": true,
"lm": "google/gemma-2-9b-it",
"torch_dtype": "bfloat16",
"transformers_version": "4.46.2"
}