prince-canuma commited on
Commit
8123774
·
verified ·
1 Parent(s): 317ff41

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -14
README.md CHANGED
@@ -1,16 +1,16 @@
1
- ---
2
- license: gemma
3
- tags:
4
- - mlx
5
- library_name: mlx
6
- pipeline_tag: text-generation
7
- base_model: gg-hf-gm/gemma-3-270m-it
8
- ---
9
-
10
- # google/gemma-3-270m-it-bf16
11
-
12
- This model [google/gemma-3-270m-it-bf16](https://huggingface.co/google/gemma-3-270m-it-bf16) was
13
- converted to MLX format from [gg-hf-gm/gemma-3-270m-it](https://huggingface.co/gg-hf-gm/gemma-3-270m-it)
14
  using mlx-lm version **0.26.3**.
15
 
16
  ## Use with mlx
@@ -22,7 +22,7 @@ pip install mlx-lm
22
  ```python
23
  from mlx_lm import load, generate
24
 
25
- model, tokenizer = load("google/gemma-3-270m-it-bf16")
26
 
27
  prompt = "hello"
28
 
 
1
+ ---
2
+ license: gemma
3
+ tags:
4
+ - mlx
5
+ library_name: mlx
6
+ pipeline_tag: text-generation
7
+ base_model: google/gemma-3-270m-it
8
+ ---
9
+
10
+ # mlx-community/gemma-3-270m-it-bf16
11
+
12
+ This model [mlx-community/gemma-3-270m-it-bf16](https://huggingface.co/mlx-community/gemma-3-270m-it-bf16) was
13
+ converted to MLX format from [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it)
14
  using mlx-lm version **0.26.3**.
15
 
16
  ## Use with mlx
 
22
  ```python
23
  from mlx_lm import load, generate
24
 
25
+ model, tokenizer = load("mlx-community/gemma-3-270m-it-bf16")
26
 
27
  prompt = "hello"
28