husseinhug321 commited on
Commit
b7708d6
·
verified ·
1 Parent(s): 28d4dca

Update llms.py

Browse files
Files changed (1) hide show
  1. llms.py +5 -53
llms.py CHANGED
@@ -1,59 +1,11 @@
 
1
  from dotenv import load_dotenv
2
- import logging
3
 
4
- import torch
5
- from transformers import (
6
- AutoModelForCausalLM,
7
- AutoTokenizer,
8
- pipeline,
9
- )
10
-
11
- from langchain_huggingface import HuggingFacePipeline
12
- from langchain.globals import set_debug
13
- from langchain.globals import set_verbose
14
-
15
- from config import HF_MODEL_ID
16
- from config import LLM_VERBOSE
17
 
18
- set_verbose(LLM_VERBOSE)
19
- set_debug(LLM_VERBOSE)
20
-
21
- logger = logging.getLogger(__name__)
22
  load_dotenv()
23
 
24
- cuda_check = torch.cuda.is_available()
25
- logger.info(f"torch.cuda.is_available : {cuda_check}")
26
- print(f"> torch.cuda.is_available : {cuda_check}")
27
-
28
- # Load Llama3 model and tokenizer
29
- model_id = HF_MODEL_ID
30
-
31
- tokenizer = AutoTokenizer.from_pretrained(model_id)
32
-
33
- # BitsAndBytesConfig int-4 config
34
- # device_map = {"": 0}
35
- device_map = "auto"
36
- compute_dtype = getattr(torch, "float16")
37
-
38
- model = AutoModelForCausalLM.from_pretrained(
39
- model_id,
40
- device_map=device_map,
41
- # attn_implementation="flash_attention_2",
42
- # quantization_config=bnb_config,
43
- )
44
-
45
- model.generation_config.pad_token_id = tokenizer.eos_token_id
46
-
47
- pipe = pipeline(
48
- "text-generation",
49
- model=model,
50
- tokenizer=tokenizer,
51
- max_new_tokens=50,
52
- return_full_text=False,
53
- num_return_sequences=1,
54
- eos_token_id=tokenizer.eos_token_id,
55
- temperature=0.0001,
56
- do_sample=True,
57
  )
58
-
59
- llm = HuggingFacePipeline(pipeline=pipe)
 
1
+ import os
2
  from dotenv import load_dotenv
 
3
 
4
+ from langchain_openai import ChatOpenAI
 
 
 
 
 
 
 
 
 
 
 
 
5
 
 
 
 
 
6
  load_dotenv()
7
 
8
+ llm = ChatOpenAI(
9
+ model="gpt-4o-mini",
10
+ temperature=0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  )