Remostart commited on
Commit
5251795
·
verified ·
1 Parent(s): cd4b44a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -26
app.py CHANGED
@@ -1,44 +1,87 @@
1
  import gradio as gr
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
- from spaces import GPU # Import for GPU decorator
 
 
 
 
 
5
 
6
  # Load model & tokenizer
7
  MODEL_NAME = "ubiodee/Test_Plutus"
8
 
9
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
11
- model.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # Set pad token if not defined
14
  if tokenizer.pad_token_id is None:
15
  tokenizer.pad_token_id = tokenizer.eos_token_id
 
16
 
 
17
  if torch.cuda.is_available():
18
  model.to("cuda")
 
 
 
19
 
20
  # Response function with GPU decorator
21
  @spaces.GPU
22
- def generate_response(prompt):
23
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
24
-
25
- with torch.no_grad():
26
- outputs = model.generate(
27
- **inputs,
28
- max_new_tokens=200,
29
- temperature=0.7,
30
- top_p=0.9,
31
- do_sample=True,
32
- eos_token_id=tokenizer.eos_token_id,
33
- pad_token_id=tokenizer.pad_token_id,
34
- )
35
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
-
37
- # Remove the prompt from the output to return only the answer
38
- if response.startswith(prompt):
39
- response = response[len(prompt):].strip()
40
-
41
- return response
 
 
 
 
 
 
 
42
 
43
  # Gradio UI
44
  demo = gr.Interface(
@@ -49,5 +92,5 @@ demo = gr.Interface(
49
  description="Write Plutus smart contracts on Cardano blockchain."
50
  )
51
 
52
- # Launch with queueing for Spaces
53
- demo.queue().launch(enable_queue=True, max_threads=1)
 
1
  import gradio as gr
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from spaces import GPU
5
+ import logging
6
+
7
+ # Set up logging
8
+ logging.basicConfig(level=logging.INFO)
9
+ logger = logging.getLogger(__name__)
10
 
11
  # Load model & tokenizer
12
  MODEL_NAME = "ubiodee/Test_Plutus"
13
 
14
+ try:
15
+ logger.info("Loading tokenizer with use_fast=False...")
16
+ tokenizer = AutoTokenizer.from_pretrained(
17
+ MODEL_NAME,
18
+ use_fast=False, # Use slow tokenizer to avoid fast tokenizer errors
19
+ use_safetensors=True,
20
+ trust_remote_code=True, # Allow custom tokenizer code
21
+ )
22
+ logger.info("Tokenizer loaded successfully.")
23
+ except Exception as e:
24
+ logger.error(f"Tokenizer loading failed: {str(e)}")
25
+ raise
26
+
27
+ try:
28
+ logger.info("Loading model with 8-bit quantization...")
29
+ model = AutoModelForCausalLM.from_pretrained(
30
+ MODEL_NAME,
31
+ device_map="auto", # Automatically map to GPU/CPU
32
+ load_in_8bit=True, # Use 8-bit quantization to match model
33
+ torch_dtype=torch.bfloat16, # Use bfloat16 for efficiency
34
+ use_safetensors=True,
35
+ low_cpu_mem_usage=True, # Reduce CPU memory during loading
36
+ trust_remote_code=True, # Allow custom model code
37
+ )
38
+ model.eval()
39
+ logger.info("Model loaded successfully.")
40
+ except Exception as e:
41
+ logger.error(f"Model loading failed: {str(e)}")
42
+ raise
43
 
44
  # Set pad token if not defined
45
  if tokenizer.pad_token_id is None:
46
  tokenizer.pad_token_id = tokenizer.eos_token_id
47
+ logger.info("Set pad_token_id to eos_token_id.")
48
 
49
+ # Move model to GPU if available
50
  if torch.cuda.is_available():
51
  model.to("cuda")
52
+ logger.info("Model moved to GPU.")
53
+ else:
54
+ logger.warning("No GPU available, using CPU.")
55
 
56
  # Response function with GPU decorator
57
  @spaces.GPU
58
+ def generate_response(prompt, progress=gr.Progress()):
59
+ progress(0.1, desc="Tokenizing input...")
60
+ try:
61
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
62
+
63
+ progress(0.5, desc="Generating response...")
64
+ with torch.no_grad():
65
+ outputs = model.generate(
66
+ **inputs,
67
+ max_new_tokens=200,
68
+ temperature=0.7,
69
+ top_p=0.9,
70
+ do_sample=True,
71
+ eos_token_id=tokenizer.eos_token_id,
72
+ pad_token_id=tokenizer.pad_token_id,
73
+ )
74
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
75
+
76
+ # Remove the prompt from the output
77
+ if response.startswith(prompt):
78
+ response = response[len(prompt):].strip()
79
+
80
+ progress(1.0, desc="Done!")
81
+ return response
82
+ except Exception as e:
83
+ logger.error(f"Inference failed: {str(e)}")
84
+ return f"Error during generation: {str(e)}"
85
 
86
  # Gradio UI
87
  demo = gr.Interface(
 
92
  description="Write Plutus smart contracts on Cardano blockchain."
93
  )
94
 
95
+ # Launch with queueing
96
+ demo.queue(max_size=10).launch(enable_queue=True, max_threads=1)