Jeong-hun Kim commited on
Commit
8aea77f
·
1 Parent(s): 2d828c3

main -> huggingface release build

Browse files
Files changed (2) hide show
  1. app/main.py → app.py +5 -5
  2. howToStart.txt +3 -0
app/main.py → app.py RENAMED
@@ -1,18 +1,18 @@
1
- from fastapi import FastAPI
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
  import torch
5
  import re
6
-
7
- app = FastAPI()
8
 
9
  print("[torch] is available:", torch.cuda.is_available())
10
  print("[device] default:", torch.device("cuda" if torch.cuda.is_available() else "cpu"))
11
 
12
  # 모델 로드
13
  model_id = "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-1.5B"
14
- with open("token.txt", "r") as f:
15
- access_token = f.read().strip()
 
 
16
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token)
17
  model = AutoModelForCausalLM.from_pretrained(
18
  model_id,
 
 
1
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
  import torch
4
  import re
5
+ import os
 
6
 
7
  print("[torch] is available:", torch.cuda.is_available())
8
  print("[device] default:", torch.device("cuda" if torch.cuda.is_available() else "cpu"))
9
 
10
  # 모델 로드
11
  model_id = "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-1.5B"
12
+
13
+ # 허깅 페이스 secret에 등록된 토큰 로드
14
+ access_token = os.environ.get("HF_TOKEN")
15
+
16
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token)
17
  model = AutoModelForCausalLM.from_pretrained(
18
  model_id,
howToStart.txt CHANGED
@@ -10,5 +10,8 @@ venv\Scripts\activate
10
  3. install requirements
11
  pip install -r requirements.txt
12
 
 
 
 
13
  4. start server
14
  python app\main.py
 
10
  3. install requirements
11
  pip install -r requirements.txt
12
 
13
+ 3.2. recieve hugging face token
14
+ and write it into `./token.txt`
15
+
16
  4. start server
17
  python app\main.py