brendon-ai commited on
Commit
14b4fb4
·
verified ·
1 Parent(s): 2244fcd

Update src/RAGSample.py

Browse files
Files changed (1) hide show
  1. src/RAGSample.py +11 -12
src/RAGSample.py CHANGED
@@ -387,18 +387,17 @@ Answer:
387
 
388
  # Initialize a local Hugging Face model
389
  hf_pipeline = pipeline(
390
- "text-generation",
391
- model="m42-health/Llama3-Med42-8B",
392
- tokenizer="m42-health/Llama3-Med42-8B",
393
- max_new_tokens=150,
394
- max_length=2048, # Llama3 supports longer context
395
- temperature=0.3,
396
- device_map="auto",
397
- return_full_text=False,
398
- truncation=True,
399
- do_sample=True,
400
- pad_token_id=128001, # Llama3 pad token
401
- eos_token_id=128009, # Llama3 EOS token
402
  )
403
 
404
  # Wrap it in LangChain
 
387
 
388
  # Initialize a local Hugging Face model
389
  hf_pipeline = pipeline(
390
+ "text-generation",
391
+ model="Narrativaai/BioGPT-Large-finetuned-chatdoctor",
392
+ tokenizer="Narrativaai/BioGPT-Large-finetuned-chatdoctor",
393
+ max_new_tokens=100,
394
+ max_length=512,
395
+ temperature=0.3, # Lower temp for medical accuracy
396
+ device="cpu",
397
+ return_full_text=False,
398
+ truncation=True,
399
+ do_sample=True,
400
+ torch_dtype="float32",
 
401
  )
402
 
403
  # Wrap it in LangChain