Dataset Viewer
The dataset viewer is not available for this subset.
Cannot get the split names for the config 'default' of the dataset.
Exception:    SplitsNotFoundError
Message:      The split names could not be parsed from the dataset config.
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 299, in get_dataset_config_info
                  for split_generator in builder._split_generators(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/webdataset/webdataset.py", line 87, in _split_generators
                  pa_tables = [
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/webdataset/webdataset.py", line 88, in <listcomp>
                  pa.Table.from_pylist(cast_to_python_objects([example], only_1d_for_numpy=True))
                File "pyarrow/table.pxi", line 1877, in pyarrow.lib._Tabular.from_pylist
                File "pyarrow/table.pxi", line 5399, in pyarrow.lib._from_pylist
                File "pyarrow/table.pxi", line 3974, in pyarrow.lib.Table.from_arrays
                File "pyarrow/table.pxi", line 1449, in pyarrow.lib._sanitize_arrays
                File "pyarrow/table.pxi", line 1430, in pyarrow.lib._schema_from_arrays
                File "pyarrow/array.pxi", line 343, in pyarrow.lib.array
                File "pyarrow/array.pxi", line 42, in pyarrow.lib._sequence_to_array
                File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
                File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
              pyarrow.lib.ArrowInvalid: Could not convert 'nil' with type str: tried to convert to int64
              
              The above exception was the direct cause of the following exception:
              
              Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/config/split_names.py", line 65, in compute_split_names_from_streaming_response
                  for split in get_dataset_split_names(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 353, in get_dataset_split_names
                  info = get_dataset_config_info(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 304, in get_dataset_config_info
                  raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
              datasets.inspect.SplitsNotFoundError: The split names could not be parsed from the dataset config.

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/datasets-cards)

Dataset Card for Sign Bible Dataset

This dataset contains Indian sign language videos from the Bible recordings, processed for machine learning applications. The dataset is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License (CC BY-SA 4.0).

Dataset Details

Contains 1111 sign videos of 19.5 hours in total.

Uses

  • Sign video and corresponding pose estimation data for pose based applications
  • Parallel Sign language - text for translation purposes

How to use

import webdataset as wds
from torch.utils.data import DataLoader
import numpy as np
import json
import tempfile
import os
import cv2


def main():
    buffer_size = 1024
    dataset = (
        wds.WebDataset("https://huggingface.co/datasets/bridgeconn/sign-dictionary-isl/resolve/main/chunk_{00001..00002}.tar", shardshuffle=False)
        .shuffle(buffer_size)
        .decode()
    )
    for sample in dataset:
        ''' Each sample contains:
             'mp4', 'pose-animation.mp4', 
             'pose-dwpose.npz', 'pose-mediapipe.pose'
             and 'json
        '''
        # print(sample.keys())

        # JSON metadata
        json_data = sample['json']
        print(json_data['filename']) 
        print(json_data['bible-ref']) 
        print(json_data['biblenlp-vref']) 
        print(json_data['signer'])
        print(json_data['transcripts']) 

        # main video
        mp4_data = sample['mp4']
        process_video(mp4_data)
        
        # pose video
        pose_data = sample['pose-animation.mp4']
        process_video(pose_data)

        # dwpose results
        dwpose_coords = sample["pose-dwpose.npz"]
        frame_poses = dwpose_coords['frames'].tolist()
        print(f"Frames in dwpose coords: {len(frame_poses)} poses")
        print(f"Pose coords shape: {len(frame_poses[0][0])}")
        print(f"One point looks like [x,y]: {frame_poses[0][0][0]}")

        # mediapipe results in .pose format
        pose_format_data = sample["pose-mediapipe.pose"]
        process_poseformat(pose_format_data)

        break


def process_poseformat(pose_format_data):
    from pose_format import Pose
    temp_file = None
    try:
        with tempfile.NamedTemporaryFile(suffix=".pose", delete=False) as tmp:
            tmp.write(pose_format_data)
            temp_file = tmp.name

        data_buffer = open(temp_file, "rb").read()
        pose = Pose.read(data_buffer)

        print(f"Mediapipe results from pose-format: {pose.body.data.shape}")
    except Exception as e:
        print(f"Error processing pose-format: {e}")
    finally:
        if temp_file and os.path.exists(temp_file):
            os.remove(temp_file) # Clean up the temporary file


def process_video(mp4_data):
    print(f"Video bytes length: {len(mp4_data)} bytes")

    temp_file = None
    try:
        # Processing video from temporary file
        with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
            tmp.write(mp4_data)
            temp_file = tmp.name

        cap = cv2.VideoCapture(temp_file)

        if not cap.isOpened():
            raise IOError(f"Could not open video file: {temp_file}")

        # Example: Get video metadata
        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        print(f"Video Info: {frame_count} frames, {fps:.2f} FPS, {width}x{height}")

        # Example: Read and display the first frame (or process as needed)
        ret, frame = cap.read()
        if ret:
            print(f"First frame shape: {frame.shape}, dtype: {frame.dtype}")
            # You can then use this frame for further processing, e.g.,
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            import matplotlib.pyplot as plt
            plt.imshow(frame_rgb)
            plt.title(f"Sample First Frame")
            plt.show()
        else:
            print("Could not read first frame.")

        cap.release()

    except Exception as e:
        print(f"Error processing external MP4: {e}")
    finally:
        if temp_file and os.path.exists(temp_file):
            os.remove(temp_file) # Clean up the temporary file


if __name__ == '__main__':
    main()
Downloads last month
124