weiweiz1 commited on
Commit
ef21565
1 Parent(s): e6f3e64

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -4
README.md CHANGED
@@ -60,6 +60,11 @@ model = AutoModelForCausalLM.from_pretrained(
60
  torch_dtype='auto',
61
  device_map="auto",
62
  )
 
 
 
 
 
63
  prompt = "There is a girl who likes adventure,"
64
  messages = [
65
  {"role": "system", "content": "You are a helpful assistant."},
@@ -86,11 +91,9 @@ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_
86
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
87
  print(response)
88
 
89
- ##prompt = "请介绍一下阿里巴巴公司"
90
- ##阿里巴巴集团是中国最大的电子商务公司之一,由马云等18人于1999年在杭州创立。阿里巴巴集团旗下拥有多个知名的电子商务平台,包括�
91
 
92
- ##prompt = "9.8大还是9.11大"
93
- ##9.11
94
 
95
  ##prompt = "Once upon a time,"
96
  ##it seems like we're about to start a classic fairy tale. Would you like to continue the story, or would you like me to take over and spin a yarn for you?
 
60
  torch_dtype='auto',
61
  device_map="auto",
62
  )
63
+
64
+ ##import habana_frameworks.torch.core as htcore ## uncommnet it for HPU
65
+ ##import habana_frameworks.torch.hpu as hthpu ## uncommnet it for HPU
66
+ ##model = model.to(torch.bfloat16).to("hpu") ## uncommnet it for HPU
67
+
68
  prompt = "There is a girl who likes adventure,"
69
  messages = [
70
  {"role": "system", "content": "You are a helpful assistant."},
 
91
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
92
  print(response)
93
 
 
 
94
 
95
+ ##prompt = "Which one is bigger, 9.11 or 9.8"
96
+ ##9.11 is bigger than 9.8.
97
 
98
  ##prompt = "Once upon a time,"
99
  ##it seems like we're about to start a classic fairy tale. Would you like to continue the story, or would you like me to take over and spin a yarn for you?