This commit is contained in:
2026-02-26 16:29:38 +08:00
parent 0ad0ea5c10
commit 3007cfde93

View File

@ -12,124 +12,11 @@ from transformers.video_utils import VideoInput
from transformers.cache_utils import Cache
from transformers.processing_utils import Unpack
import os
import time
logger = logging.get_logger(__name__)
class myQwen3VLProcessor(Qwen3VLProcessor):
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
super().__init__(image_processor, tokenizer, video_processor, chat_template, **kwargs)
def __call__(
self,
images: ImageInput = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
videos: VideoInput = None,
**kwargs: Unpack[Qwen3VLProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Qwen3VLProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
video_grid_thw = videos_inputs["video_grid_thw"]
# If user has not requested video metadata, pop it
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
# num_image_tokens = image_grid_thw[index].prod() // merge_length
num_image_tokens = 40
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
metadata = video_metadata[index]
if metadata.fps is None:
logger.warning_once(
"Qwen3VL requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
# if timestamps are not provided, calculate them
curr_timestamp = self._calculate_timestamps(
metadata.frames_indices,
metadata.fps,
self.video_processor.temporal_patch_size,
)
video_placeholder = ""
frame_seqlen = video_grid_thw[index][1:].prod() // merge_length
for frame_idx in range(video_grid_thw[index][0]):
curr_time = curr_timestamp[frame_idx]
video_placeholder += f"<{curr_time:.1f} seconds>"
video_placeholder += (
self.vision_start_token + "<|placeholder|>" * frame_seqlen + self.vision_end_token
)
if f"{self.vision_start_token}{self.video_token}{self.vision_end_token}" in text[i]:
text[i] = text[i].replace(
f"{self.vision_start_token}{self.video_token}{self.vision_end_token}", video_placeholder, 1
)
else:
# vllm may input video token directly
text[i] = text[i].replace(self.video_token, video_placeholder, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _sample_indices_uniform(idx: torch.LongTensor, keep_ratio: float, min_keep: int = 0):
"""
idx: 1D indices in original sequence (sorted)
@ -250,6 +137,14 @@ def patch_forward(
The temporal, height and width of feature shape of each video in LLM.
"""
def _sync():
# 只在 CUDA 上同步,避免 CPU 模式报错
if torch.cuda.is_available() and inputs_embeds is not None and inputs_embeds.is_cuda:
torch.cuda.synchronize()
def _ms(t0):
return (time.perf_counter() - t0) * 1000.0
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
@ -260,9 +155,17 @@ def patch_forward(
video_mask = None
if pixel_values is not None:
_sync()
t_img = time.perf_counter()
image_outputs: BaseModelOutputWithDeepstackFeatures = self.get_image_features(
pixel_values, image_grid_thw, return_dict=True
)
_sync()
print(f"[VLPATCH_DEBUG] get_image_features: {_ms(t_img):.3f} ms")
image_embeds = image_outputs.pooler_output
deepstack_image_embeds = image_outputs.deepstack_features
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
@ -324,6 +227,15 @@ def patch_forward(
min_keep = kwargs.pop("min_keep_per_vis", 0) # 每段视觉最少保留多少(可设比如 16
max_len = kwargs.pop("truncate_max_len", None) # 总长度上限(可选)
# 裁剪前统计
L0 = inputs_embeds.shape[1]
nvis0 = int(visual_pos_masks.sum().item()) if visual_pos_masks is not None else -1
eff0 = int(attention_mask.sum().item()) if attention_mask is not None else -1
print(f"[VLPATCH_DEBUG] BEFORE prune: L={L0}, visual={nvis0}, eff={eff0}")
_sync()
t_prune = time.perf_counter()
inputs_embeds, attention_mask, position_ids, visual_pos_masks, deepstack_visual_embeds = sparse_keep_and_gather(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
@ -335,6 +247,18 @@ def patch_forward(
max_len=max_len,
)
_sync()
print(f"[VLPATCH_DEBUG] sparse_keep_and_gather: {_ms(t_prune):.3f} ms")
L1 = inputs_embeds.shape[1]
nvis1 = int(visual_pos_masks.sum().item()) if visual_pos_masks is not None else -1
eff1 = int(attention_mask.sum().item()) if attention_mask is not None else -1
print(f"[VLPATCH_DEBUG] AFTER prune: L={L1}, visual={nvis1}, eff={eff1}")
if L0 > 0 and nvis0 >= 0:
print(f"[VLPATCH_DEBUG] ΔL={L1-L0} ({(L1/L0*100):.1f}%), "
f"Δvisual={nvis1-nvis0} ({(nvis1/max(nvis0,1)*100):.1f}%)")
# cache_position 建议重建为 0..L-1避免对齐问题
cache_position = torch.arange(
inputs_embeds.shape[1], device=inputs_embeds.device, dtype=torch.long
@ -346,6 +270,9 @@ def patch_forward(
self.rope_deltas = (max_pos + 1 - eff_len).unsqueeze(1)
# ====== 裁剪结束 ======
_sync()
t_lm = time.perf_counter()
outputs = self.language_model(
input_ids=None,
position_ids=position_ids,
@ -358,6 +285,9 @@ def patch_forward(
**kwargs,
)
_sync()
print(f"[VLPATCH_DEBUG] language_model: {_ms(t_lm):.3f} ms")
return Qwen3VLModelOutputWithPast(
**outputs,
rope_deltas=self.rope_deltas,