maeshay commited on
Commit
282e4e5
·
verified ·
1 Parent(s): 7c8ac60

Update core/make_pipeline.py

Browse files
Files changed (1) hide show
  1. core/make_pipeline.py +12 -4
core/make_pipeline.py CHANGED
@@ -1,11 +1,19 @@
1
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  import os
4
- import spaces
 
 
 
 
 
 
 
 
5
 
6
  class MakePipeline:
7
  # 모델명
8
- MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.2"
9
 
10
  # 변수초기화
11
  # model_id
@@ -24,7 +32,7 @@ class MakePipeline:
24
  "repetition_penalty": 1.05,
25
  "max_new_tokens": 96
26
  }
27
-
28
  # 모델 불러오기
29
  @spaces.GPU
30
  def build(self, type: str):
@@ -50,7 +58,6 @@ class MakePipeline:
50
  "text-generation",
51
  model=model,
52
  tokenizer=tokenizer,
53
- torch_dtype=torch.float16
54
  )
55
 
56
  else:
@@ -82,6 +89,7 @@ class MakePipeline:
82
  temperature=self.config["temperature"],
83
  top_p=self.config["top_p"],
84
  repetition_penalty=self.config["repetition_penalty"],
 
85
  return_full_text=True
86
  )
87
  return outputs[0]["generated_text"]
 
1
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  import os
4
+
5
+ try:
6
+ import spaces
7
+ except ImportError:
8
+ class DummyDecorator:
9
+ @staticmethod
10
+ def GPU(func):
11
+ return func
12
+ spaces = DummyDecorator()
13
 
14
  class MakePipeline:
15
  # 모델명
16
+ MODEL_ID = "naver-hyperclovax/HyperCLOVAX-SEED-Vision-Instruct-3B"
17
 
18
  # 변수초기화
19
  # model_id
 
32
  "repetition_penalty": 1.05,
33
  "max_new_tokens": 96
34
  }
35
+
36
  # 모델 불러오기
37
  @spaces.GPU
38
  def build(self, type: str):
 
58
  "text-generation",
59
  model=model,
60
  tokenizer=tokenizer,
 
61
  )
62
 
63
  else:
 
89
  temperature=self.config["temperature"],
90
  top_p=self.config["top_p"],
91
  repetition_penalty=self.config["repetition_penalty"],
92
+ eos_token_id=self.tokenizer.eos_token_id,
93
  return_full_text=True
94
  )
95
  return outputs[0]["generated_text"]