Results on MMEB(V1/V2)?

#43
by TMRBMWK - opened

Can you share jina-embeddings-v4 test results on MMEB(Massive Multimodal Embedding Benchmark)?

ref1: https://huggingface.co/spaces/TIGER-Lab/MMEB-Leaderboard
ref2: https://github.com/TIGER-AI-Lab/VLM2Vec

Estimated evaluation results in single_vector style are close to colpali_1.3, therefore it processes image features by using masked image features, without text+image combined feature representation。

def get_single_vector_embeddings(
        self,
        hidden_states: torch.Tensor,
        attention_mask: torch.Tensor,
        input_ids: Optional[torch.LongTensor] = None,
    ) -> torch.Tensor:
        """
        Get the single-vector embeddings from the hidden states.
        """
        if self._input_has_image(input_ids[0]):  # got document image
            img_start_positions = torch.where(
                input_ids == self.config.vision_start_token_id
            )[1]
            img_end_positions = torch.where(
                input_ids == self.config.vision_end_token_id
            )[1]

            batch_size, seq_len = input_ids.shape
            position_indices = torch.arange(seq_len, device=input_ids.device).expand(
                batch_size, -1
            )
            image_mask = (position_indices >= img_start_positions.unsqueeze(1)) & (
                position_indices <= img_end_positions.unsqueeze(1)
            )

            masked_hidden_states = hidden_states * image_mask.unsqueeze(-1)
            pooled_output = masked_hidden_states.sum(dim=1) / image_mask.sum(
                dim=1, keepdim=True
            )

        else:  # got query text
            pooled_output = torch.sum(
                hidden_states * attention_mask.unsqueeze(-1), dim=1
            ) / torch.sum(attention_mask, dim=1, keepdim=True)

        return torch.nn.functional.normalize(pooled_output, dim=-1)

    def get_multi_vector_embeddings(
        self,
        task_label: Union[str, List[str]],
        hidden_states: torch.Tensor,
        attention_mask: torch.Tensor,
    ) -> torch.Tensor:
        """
        Project the hidden states to multi-vector embeddings.
        """
        multi_vec_emb = self.multi_vector_projector(
            hidden_states, task_label=task_label
        )
        multi_vec_emb = torch.nn.functional.normalize(multi_vec_emb, dim=-1)
        return multi_vec_emb * attention_mask.unsqueeze(-1)

Sign up or log in to comment