Update README.md
#21
by
csabakecskemeti
- opened
README.md
CHANGED
@@ -60,19 +60,21 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor
|
|
60 |
import torch
|
61 |
import numpy as np
|
62 |
|
|
|
63 |
# Load the model, tokenizer and processor
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
67 |
# Create time series and prompts
|
68 |
timeseries = np.sin(np.arange(256) / 10) * 5.0
|
69 |
timeseries[100:] -= 10.0
|
70 |
prompt = f"I have a time series length of 256: <ts><ts/>. Please analyze the local changes in this time series."
|
71 |
# Apply Chat Template
|
72 |
-
prompt = f"<|im_start|>system
|
73 |
You are a helpful assistant.<|im_end|><|im_start|>user
|
74 |
{prompt}<|im_end|><|im_start|>assistant
|
75 |
-
"
|
76 |
# Convert to tensor
|
77 |
inputs = processor(text=[prompt], timeseries=[timeseries], padding=True, return_tensors="pt")
|
78 |
# Model Generate
|
|
|
60 |
import torch
|
61 |
import numpy as np
|
62 |
|
63 |
+
hf_model = "bytedance-research/ChatTS-14B"
|
64 |
# Load the model, tokenizer and processor
|
65 |
+
# For pre-Ampere GPUs (like V100) use `_attn_implementation='eager'`
|
66 |
+
model = AutoModelForCausalLM.from_pretrained(hf_model, trust_remote_code=True, device_map="auto", torch_dtype='float16')
|
67 |
+
tokenizer = AutoTokenizer.from_pretrained(hf_model, trust_remote_code=True)
|
68 |
+
processor = AutoProcessor.from_pretrained(hf_model, trust_remote_code=True, tokenizer=tokenizer)
|
69 |
# Create time series and prompts
|
70 |
timeseries = np.sin(np.arange(256) / 10) * 5.0
|
71 |
timeseries[100:] -= 10.0
|
72 |
prompt = f"I have a time series length of 256: <ts><ts/>. Please analyze the local changes in this time series."
|
73 |
# Apply Chat Template
|
74 |
+
prompt = f"""<|im_start|>system
|
75 |
You are a helpful assistant.<|im_end|><|im_start|>user
|
76 |
{prompt}<|im_end|><|im_start|>assistant
|
77 |
+
"""
|
78 |
# Convert to tensor
|
79 |
inputs = processor(text=[prompt], timeseries=[timeseries], padding=True, return_tensors="pt")
|
80 |
# Model Generate
|