Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,15 +7,8 @@ import spaces
|
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
|
10 |
-
DESCRIPTION = """
|
11 |
-
# Gemma 2 2B IT
|
12 |
|
13 |
-
Gemma 2 is Google's latest iteration of open LLMs.
|
14 |
-
This is a demo of [`google/gemma-2-2b-it`](https://huggingface.co/google/gemma-2-2b-it), fine-tuned for instruction following.
|
15 |
-
For more details, please check [our post](https://huggingface.co/blog/gemma2).
|
16 |
-
|
17 |
-
👉 Looking for a larger and more powerful version? Try the 27B version in [HuggingChat](https://huggingface.co/chat/models/google/gemma-2-27b-it) and the 9B version in [this Space](https://huggingface.co/spaces/huggingface-projects/gemma-2-9b-it).
|
18 |
-
"""
|
19 |
|
20 |
MAX_MAX_NEW_TOKENS = 2048
|
21 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
@@ -23,7 +16,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
23 |
|
24 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
25 |
|
26 |
-
model_id = "
|
27 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
28 |
model = AutoModelForCausalLM.from_pretrained(
|
29 |
model_id,
|
|
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
|
10 |
+
DESCRIPTION = """""""
|
|
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
MAX_MAX_NEW_TOKENS = 2048
|
14 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
|
|
16 |
|
17 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
18 |
|
19 |
+
model_id = "MCES10/code-gen-gemma-2-2b-it"
|
20 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
21 |
model = AutoModelForCausalLM.from_pretrained(
|
22 |
model_id,
|