prince-canuma commited on
Commit
1fcd0f0
·
verified ·
1 Parent(s): b3e953a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -14
README.md CHANGED
@@ -1,16 +1,16 @@
1
- ---
2
- license: gemma
3
- library_name: mlx
4
- base_model: gg-hf-gm/gemma-3-270m
5
- tags:
6
- - mlx
7
- pipeline_tag: text-generation
8
- ---
9
-
10
- # google/gemma-3-270m-8bit
11
-
12
- This model [google/gemma-3-270m-8bit](https://huggingface.co/google/gemma-3-270m-8bit) was
13
- converted to MLX format from [gg-hf-gm/gemma-3-270m](https://huggingface.co/gg-hf-gm/gemma-3-270m)
14
  using mlx-lm version **0.26.3**.
15
 
16
  ## Use with mlx
@@ -22,7 +22,7 @@ pip install mlx-lm
22
  ```python
23
  from mlx_lm import load, generate
24
 
25
- model, tokenizer = load("google/gemma-3-270m-8bit")
26
 
27
  prompt = "hello"
28
 
 
1
+ ---
2
+ license: gemma
3
+ base_model: google/gemma-3-270m
4
+ tags:
5
+ - mlx
6
+ library_name: mlx
7
+ pipeline_tag: text-generation
8
+ ---
9
+
10
+ # mlx-community/gemma-3-270m-8bit
11
+
12
+ This model [mlx-community/gemma-3-270m-8bit](https://huggingface.co/mlx-community/gemma-3-270m-8bit) was
13
+ converted to MLX format from [google/gemma-3-270m](https://huggingface.co/google/gemma-3-270m)
14
  using mlx-lm version **0.26.3**.
15
 
16
  ## Use with mlx
 
22
  ```python
23
  from mlx_lm import load, generate
24
 
25
+ model, tokenizer = load("mlx-community/gemma-3-270m-8bit")
26
 
27
  prompt = "hello"
28