Update README.md
Browse files
README.md
CHANGED
@@ -27,8 +27,16 @@ We introduce LiveCC, the first video LLM capable of real-time commentary, traine
|
|
27 |
## Training with Streaming Frame-Words Paradigm
|
28 |
|
29 |

|
30 |
-
|
31 |
## Quickstart
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
Like qwen-vl-utils, we offer a toolkit to help you handle various types of visual input more conveniently, **especially on video streaming inputs**. You can install it using the following command:
|
33 |
|
34 |
```bash
|
@@ -59,7 +67,6 @@ class LiveCCDemoInfer:
|
|
59 |
attn_implementation='flash_attention_2'
|
60 |
)
|
61 |
self.processor = AutoProcessor.from_pretrained(model_path, use_fast=False)
|
62 |
-
self.streaming_eos_token_id = self.processor.tokenizer(' ...').input_ids[-1]
|
63 |
self.model.prepare_inputs_for_generation = functools.partial(prepare_multiturn_multimodal_inputs_for_generation, self.model)
|
64 |
message = {
|
65 |
"role": "user",
|
@@ -71,7 +78,7 @@ class LiveCCDemoInfer:
|
|
71 |
self.system_prompt_offset = texts.index('<|im_start|>user')
|
72 |
self._cached_video_readers_with_hw = {}
|
73 |
|
74 |
-
|
75 |
def live_cc(
|
76 |
self,
|
77 |
query: str,
|
@@ -80,8 +87,6 @@ class LiveCCDemoInfer:
|
|
80 |
default_query: str = 'Please describe the video.',
|
81 |
do_sample: bool = False,
|
82 |
repetition_penalty: float = 1.05,
|
83 |
-
streaming_eos_base_threshold: float = None,
|
84 |
-
streaming_eos_threshold_step: float = None,
|
85 |
**kwargs,
|
86 |
):
|
87 |
"""
|
@@ -92,6 +97,8 @@ class LiveCCDemoInfer:
|
|
92 |
last_video_pts_index: int, last processed video frame index
|
93 |
video_pts: np.ndarray, video pts
|
94 |
last_history: list, last processed history
|
|
|
|
|
95 |
"""
|
96 |
# 1. preparation: video_reader, and last processing info
|
97 |
video_timestamp, last_timestamp = state.get('video_timestamp', 0), state.get('last_timestamp', -1 / self.fps)
|
@@ -145,7 +152,7 @@ class LiveCCDemoInfer:
|
|
145 |
}
|
146 |
if not query and not state.get('query', None):
|
147 |
query = default_query
|
148 |
-
|
149 |
if query and state.get('query', None) != query:
|
150 |
message['content'].append({"type": "text", "text": query})
|
151 |
state['query'] = query
|
@@ -163,23 +170,19 @@ class LiveCCDemoInfer:
|
|
163 |
inputs.to('cuda')
|
164 |
if past_ids is not None:
|
165 |
inputs['input_ids'] = torch.cat([past_ids, inputs.input_ids], dim=1)
|
166 |
-
if streaming_eos_base_threshold is not None:
|
167 |
-
logits_processor = [ThresholdLogitsProcessor(self.streaming_eos_token_id, streaming_eos_base_threshold, streaming_eos_threshold_step)]
|
168 |
-
else:
|
169 |
-
logits_processor = None
|
170 |
outputs = self.model.generate(
|
171 |
**inputs, past_key_values=state.get('past_key_values', None),
|
172 |
return_dict_in_generate=True, do_sample=do_sample,
|
173 |
repetition_penalty=repetition_penalty,
|
174 |
-
logits_processor=logits_processor,
|
175 |
)
|
176 |
state['past_key_values'] = outputs.past_key_values
|
177 |
state['past_ids'] = outputs.sequences[:, :-1]
|
178 |
yield (start_timestamp, stop_timestamp), self.processor.decode(outputs.sequences[0, inputs.input_ids.size(1):], skip_special_tokens=True), state
|
179 |
|
180 |
model_path = 'chenjoya/LiveCC-7B-Base'
|
181 |
-
|
182 |
-
|
|
|
183 |
|
184 |
infer = LiveCCDemoInfer(model_path=model_path)
|
185 |
state = {'video_path': video_path}
|
@@ -189,7 +192,7 @@ for t in range(31):
|
|
189 |
state['video_timestamp'] = t
|
190 |
for (start_t, stop_t), response, state in infer.live_cc(
|
191 |
query=query, state=state,
|
192 |
-
max_pixels =
|
193 |
streaming_eos_base_threshold=0.0, streaming_eos_threshold_step=0
|
194 |
):
|
195 |
print(f'{start_t}s-{stop_t}s: {response}')
|
@@ -220,7 +223,7 @@ class LiveCCDemoInfer:
|
|
220 |
self.model = Qwen2VLForConditionalGeneration.from_pretrained(
|
221 |
model_path, torch_dtype="auto",
|
222 |
device_map=device,
|
223 |
-
attn_implementation='
|
224 |
)
|
225 |
self.processor = AutoProcessor.from_pretrained(model_path, use_fast=False)
|
226 |
self.streaming_eos_token_id = self.processor.tokenizer(' ...').input_ids[-1]
|
@@ -233,17 +236,13 @@ class LiveCCDemoInfer:
|
|
233 |
}
|
234 |
texts = self.processor.apply_chat_template([message], tokenize=False)
|
235 |
self.system_prompt_offset = texts.index('<|im_start|>user')
|
236 |
-
self._cached_video_readers_with_hw = {}
|
237 |
|
238 |
-
@torch.inference_mode()
|
239 |
def video_qa(
|
240 |
self,
|
241 |
message: str,
|
242 |
state: dict,
|
243 |
-
history: list = [],
|
244 |
do_sample: bool = False,
|
245 |
repetition_penalty: float = 1.05,
|
246 |
-
hf_spaces: bool = False,
|
247 |
**kwargs,
|
248 |
):
|
249 |
"""
|
@@ -254,18 +253,11 @@ class LiveCCDemoInfer:
|
|
254 |
last_video_pts_index: int, last processed video frame index
|
255 |
video_pts: np.ndarray, video pts
|
256 |
last_history: list, last processed history
|
|
|
|
|
257 |
"""
|
258 |
video_path = state.get('video_path', None)
|
259 |
conversation = []
|
260 |
-
if hf_spaces:
|
261 |
-
for past_message in history:
|
262 |
-
content = [{"type": "text", "text": past_message['content']}]
|
263 |
-
if video_path: # only use once
|
264 |
-
content.insert(0, {"type": "video", "video": video_path})
|
265 |
-
video_path = None
|
266 |
-
conversation.append({"role": past_message["role"], "content": content})
|
267 |
-
else:
|
268 |
-
pass # use past_key_values
|
269 |
past_ids = state.get('past_ids', None)
|
270 |
content = [{"type": "text", "text": message}]
|
271 |
if past_ids is None and video_path: # only use once
|
@@ -291,23 +283,27 @@ class LiveCCDemoInfer:
|
|
291 |
repetition_penalty=repetition_penalty,
|
292 |
max_new_tokens=512,
|
293 |
)
|
294 |
-
state['past_key_values'] = outputs.past_key_values
|
295 |
-
state['past_ids'] = outputs.sequences[:, :-1]
|
296 |
response = self.processor.decode(outputs.sequences[0, inputs.input_ids.size(1):], skip_special_tokens=True)
|
297 |
return response, state
|
298 |
|
299 |
model_path = 'chenjoya/LiveCC-7B-Base'
|
300 |
-
|
|
|
301 |
|
302 |
infer = LiveCCDemoInfer(model_path=model_path)
|
303 |
state = {'video_path': video_path}
|
304 |
# first round
|
305 |
-
|
|
|
|
|
306 |
# second round
|
307 |
-
|
|
|
|
|
308 |
```
|
309 |
|
310 |
-
|
311 |
## Limitations
|
312 |
|
313 |
- This model is only performed video-ASR streaming pre-training, so it may not support well in common video qa.
|
|
|
27 |
## Training with Streaming Frame-Words Paradigm
|
28 |
|
29 |

|
|
|
30 |
## Quickstart
|
31 |
+
|
32 |
+
### Gradio Demo
|
33 |
+
|
34 |
+
Please refer to https://github.com/showlab/livecc:
|
35 |
+
|
36 |
+

|
37 |
+
|
38 |
+
### Hands-on
|
39 |
+
|
40 |
Like qwen-vl-utils, we offer a toolkit to help you handle various types of visual input more conveniently, **especially on video streaming inputs**. You can install it using the following command:
|
41 |
|
42 |
```bash
|
|
|
67 |
attn_implementation='flash_attention_2'
|
68 |
)
|
69 |
self.processor = AutoProcessor.from_pretrained(model_path, use_fast=False)
|
|
|
70 |
self.model.prepare_inputs_for_generation = functools.partial(prepare_multiturn_multimodal_inputs_for_generation, self.model)
|
71 |
message = {
|
72 |
"role": "user",
|
|
|
78 |
self.system_prompt_offset = texts.index('<|im_start|>user')
|
79 |
self._cached_video_readers_with_hw = {}
|
80 |
|
81 |
+
|
82 |
def live_cc(
|
83 |
self,
|
84 |
query: str,
|
|
|
87 |
default_query: str = 'Please describe the video.',
|
88 |
do_sample: bool = False,
|
89 |
repetition_penalty: float = 1.05,
|
|
|
|
|
90 |
**kwargs,
|
91 |
):
|
92 |
"""
|
|
|
97 |
last_video_pts_index: int, last processed video frame index
|
98 |
video_pts: np.ndarray, video pts
|
99 |
last_history: list, last processed history
|
100 |
+
past_key_values: llm past_key_values
|
101 |
+
past_ids: past generated ids
|
102 |
"""
|
103 |
# 1. preparation: video_reader, and last processing info
|
104 |
video_timestamp, last_timestamp = state.get('video_timestamp', 0), state.get('last_timestamp', -1 / self.fps)
|
|
|
152 |
}
|
153 |
if not query and not state.get('query', None):
|
154 |
query = default_query
|
155 |
+
print(f'No query provided, use default_query={default_query}')
|
156 |
if query and state.get('query', None) != query:
|
157 |
message['content'].append({"type": "text", "text": query})
|
158 |
state['query'] = query
|
|
|
170 |
inputs.to('cuda')
|
171 |
if past_ids is not None:
|
172 |
inputs['input_ids'] = torch.cat([past_ids, inputs.input_ids], dim=1)
|
|
|
|
|
|
|
|
|
173 |
outputs = self.model.generate(
|
174 |
**inputs, past_key_values=state.get('past_key_values', None),
|
175 |
return_dict_in_generate=True, do_sample=do_sample,
|
176 |
repetition_penalty=repetition_penalty,
|
|
|
177 |
)
|
178 |
state['past_key_values'] = outputs.past_key_values
|
179 |
state['past_ids'] = outputs.sequences[:, :-1]
|
180 |
yield (start_timestamp, stop_timestamp), self.processor.decode(outputs.sequences[0, inputs.input_ids.size(1):], skip_special_tokens=True), state
|
181 |
|
182 |
model_path = 'chenjoya/LiveCC-7B-Base'
|
183 |
+
# download a test video at: https://github.com/showlab/livecc/blob/main/demo/sources/howto_fix_laptop_mute_1080p.mp4
|
184 |
+
video_path = "demo/sources/howto_fix_laptop_mute_1080p.mp4"
|
185 |
+
query = "Please describe the video."
|
186 |
|
187 |
infer = LiveCCDemoInfer(model_path=model_path)
|
188 |
state = {'video_path': video_path}
|
|
|
192 |
state['video_timestamp'] = t
|
193 |
for (start_t, stop_t), response, state in infer.live_cc(
|
194 |
query=query, state=state,
|
195 |
+
max_pixels = 384 * 28 * 28, repetition_penalty=1.05,
|
196 |
streaming_eos_base_threshold=0.0, streaming_eos_threshold_step=0
|
197 |
):
|
198 |
print(f'{start_t}s-{stop_t}s: {response}')
|
|
|
223 |
self.model = Qwen2VLForConditionalGeneration.from_pretrained(
|
224 |
model_path, torch_dtype="auto",
|
225 |
device_map=device,
|
226 |
+
attn_implementation='flash_attention_2'
|
227 |
)
|
228 |
self.processor = AutoProcessor.from_pretrained(model_path, use_fast=False)
|
229 |
self.streaming_eos_token_id = self.processor.tokenizer(' ...').input_ids[-1]
|
|
|
236 |
}
|
237 |
texts = self.processor.apply_chat_template([message], tokenize=False)
|
238 |
self.system_prompt_offset = texts.index('<|im_start|>user')
|
|
|
239 |
|
|
|
240 |
def video_qa(
|
241 |
self,
|
242 |
message: str,
|
243 |
state: dict,
|
|
|
244 |
do_sample: bool = False,
|
245 |
repetition_penalty: float = 1.05,
|
|
|
246 |
**kwargs,
|
247 |
):
|
248 |
"""
|
|
|
253 |
last_video_pts_index: int, last processed video frame index
|
254 |
video_pts: np.ndarray, video pts
|
255 |
last_history: list, last processed history
|
256 |
+
past_key_values: llm past_key_values
|
257 |
+
past_ids: past generated ids
|
258 |
"""
|
259 |
video_path = state.get('video_path', None)
|
260 |
conversation = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
past_ids = state.get('past_ids', None)
|
262 |
content = [{"type": "text", "text": message}]
|
263 |
if past_ids is None and video_path: # only use once
|
|
|
283 |
repetition_penalty=repetition_penalty,
|
284 |
max_new_tokens=512,
|
285 |
)
|
286 |
+
state['past_key_values'] = outputs.past_key_values
|
287 |
+
state['past_ids'] = outputs.sequences[:, :-1]
|
288 |
response = self.processor.decode(outputs.sequences[0, inputs.input_ids.size(1):], skip_special_tokens=True)
|
289 |
return response, state
|
290 |
|
291 |
model_path = 'chenjoya/LiveCC-7B-Base'
|
292 |
+
# download a test video at: https://github.com/showlab/livecc/blob/main/demo/sources/howto_fix_laptop_mute_1080p.mp4
|
293 |
+
video_path = "demo/sources/howto_fix_laptop_mute_1080p.mp4"
|
294 |
|
295 |
infer = LiveCCDemoInfer(model_path=model_path)
|
296 |
state = {'video_path': video_path}
|
297 |
# first round
|
298 |
+
query1 = 'What is the video?'
|
299 |
+
response1, state = infer.video_qa(message=query1, state=state)
|
300 |
+
print(f'Q1: {query1}\nA1: {response1}')
|
301 |
# second round
|
302 |
+
query2 = 'How do you know that?'
|
303 |
+
response2, state = infer.video_qa(message=query2, state=state)
|
304 |
+
print(f'Q2: {query2}\nA2: {response2}')
|
305 |
```
|
306 |
|
|
|
307 |
## Limitations
|
308 |
|
309 |
- This model is only performed video-ASR streaming pre-training, so it may not support well in common video qa.
|