prince-canuma commited on
Commit
fc34924
·
verified ·
1 Parent(s): 74a687e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -14
README.md CHANGED
@@ -1,16 +1,16 @@
1
- ---
2
- license: gemma
3
- base_model: gg-hf-gm/gemma-3-270m-it
4
- library_name: mlx
5
- pipeline_tag: text-generation
6
- tags:
7
- - mlx
8
- ---
9
-
10
- # google/gemma-3-270m-it-6bit
11
-
12
- This model [google/gemma-3-270m-it-6bit](https://huggingface.co/google/gemma-3-270m-it-6bit) was
13
- converted to MLX format from [gg-hf-gm/gemma-3-270m-it](https://huggingface.co/gg-hf-gm/gemma-3-270m-it)
14
  using mlx-lm version **0.26.3**.
15
 
16
  ## Use with mlx
@@ -22,7 +22,7 @@ pip install mlx-lm
22
  ```python
23
  from mlx_lm import load, generate
24
 
25
- model, tokenizer = load("google/gemma-3-270m-it-6bit")
26
 
27
  prompt = "hello"
28
 
 
1
+ ---
2
+ license: gemma
3
+ base_model: google/gemma-3-270m-it
4
+ library_name: mlx
5
+ pipeline_tag: text-generation
6
+ tags:
7
+ - mlx
8
+ ---
9
+
10
+ # mlx-community/gemma-3-270m-it-6bit
11
+
12
+ This model [mlx-community/gemma-3-270m-it-6bit](https://huggingface.co/mlx-community/gemma-3-270m-it-6bit) was
13
+ converted to MLX format from [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it)
14
  using mlx-lm version **0.26.3**.
15
 
16
  ## Use with mlx
 
22
  ```python
23
  from mlx_lm import load, generate
24
 
25
+ model, tokenizer = load("mlx-community/gemma-3-270m-it-6bit")
26
 
27
  prompt = "hello"
28