Spaces:
Paused
Paused
Commit
·
fd4c28d
1
Parent(s):
9e7be30
Update app_v3.py
Browse files
app_v3.py
CHANGED
@@ -3,6 +3,7 @@ from transformers import AutoTokenizer, TextStreamer, pipeline
|
|
3 |
from auto_gptq import AutoGPTQForCausalLM
|
4 |
from huggingface_hub import snapshot_download
|
5 |
import os
|
|
|
6 |
|
7 |
# Define pretrained and quantized model directories
|
8 |
pretrained_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
|
@@ -22,6 +23,10 @@ model_basename = "Jackson2-4bit-128g-GPTQ"
|
|
22 |
|
23 |
#os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
24 |
|
|
|
|
|
|
|
|
|
25 |
use_triton = False
|
26 |
|
27 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True, legacy=False)
|
|
|
3 |
from auto_gptq import AutoGPTQForCausalLM
|
4 |
from huggingface_hub import snapshot_download
|
5 |
import os
|
6 |
+
import gc
|
7 |
|
8 |
# Define pretrained and quantized model directories
|
9 |
pretrained_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
|
|
|
23 |
|
24 |
#os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
25 |
|
26 |
+
# Before allocating or loading the model, clear up memory
|
27 |
+
gc.collect()
|
28 |
+
torch.cuda.empty_cache()
|
29 |
+
|
30 |
use_triton = False
|
31 |
|
32 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True, legacy=False)
|