Upload llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256
Browse files
    	
        llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/bench.slurm
    ADDED
    
    | 
         @@ -0,0 +1,111 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            #!/bin/bash
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
            #SBATCH --job-name=bench_cluster
         
     | 
| 4 | 
         
            +
            #SBATCH --time=01:30:00
         
     | 
| 5 | 
         
            +
            #SBATCH --partition=hopper-prod
         
     | 
| 6 | 
         
            +
            #SBATCH --nodes=2
         
     | 
| 7 | 
         
            +
            #SBATCH --gres=gpu:8
         
     | 
| 8 | 
         
            +
            #SBATCH --qos=normal
         
     | 
| 9 | 
         
            +
            #SBATCH --ntasks-per-node=1
         
     | 
| 10 | 
         
            +
            #SBATCH --cpus-per-task=96
         
     | 
| 11 | 
         
            +
            #SBATCH --exclusive
         
     | 
| 12 | 
         
            +
            #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/log.out
         
     | 
| 13 | 
         
            +
            #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/log.out
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            # Function to update status based on squeue output
         
     | 
| 16 | 
         
            +
            update_status() {
         
     | 
| 17 | 
         
            +
                job_id=$1
         
     | 
| 18 | 
         
            +
                status_file=$2
         
     | 
| 19 | 
         
            +
                # For unknown reasons, it doenst update status for pending. It only works for running 
         
     | 
| 20 | 
         
            +
                while true; do
         
     | 
| 21 | 
         
            +
                    job_status=$(squeue --job $job_id --noheader --format=%T)
         
     | 
| 22 | 
         
            +
                    echo "Job status: $job_status"
         
     | 
| 23 | 
         
            +
                    if [ -z "$job_status" ]; then
         
     | 
| 24 | 
         
            +
                        # Job has finished or is not found
         
     | 
| 25 | 
         
            +
                        break
         
     | 
| 26 | 
         
            +
                    elif [ "$job_status" = "RUNNING" ]; then
         
     | 
| 27 | 
         
            +
                        printf "running" > $status_file
         
     | 
| 28 | 
         
            +
                        break
         
     | 
| 29 | 
         
            +
                    fi
         
     | 
| 30 | 
         
            +
                    sleep 10
         
     | 
| 31 | 
         
            +
                done
         
     | 
| 32 | 
         
            +
            }
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            # Misc initializations.
         
     | 
| 35 | 
         
            +
            echo "========================"
         
     | 
| 36 | 
         
            +
            echo "START TIME: $(date)"
         
     | 
| 37 | 
         
            +
            source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
         
     | 
| 38 | 
         
            +
            conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
         
     | 
| 39 | 
         
            +
            echo python3 version = $(python3 --version)
         
     | 
| 40 | 
         
            +
            echo "========================"
         
     | 
| 41 | 
         
            +
             
     | 
| 42 | 
         
            +
            # Slurm stuff
         
     | 
| 43 | 
         
            +
            export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
         
     | 
| 44 | 
         
            +
            export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
         
     | 
| 45 | 
         
            +
            export MASTER_PORT=$((1024 + RANDOM % 64511))
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
            export TMPDIR=/scratch
         
     | 
| 48 | 
         
            +
            export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
         
     | 
| 49 | 
         
            +
            export CUBLAS_WORKSPACE_CONFIG=":4096:8"
         
     | 
| 50 | 
         
            +
            export CUDA_DEVICE_MAX_CONNECTIONS="1"
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            huggingface-cli login --token $HUGGINGFACE_TOKEN
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
            NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
         
     | 
| 56 | 
         
            +
            CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/config.yaml"
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            LAUNCHER="torchrun \
         
     | 
| 59 | 
         
            +
               --nproc_per_node 8 \
         
     | 
| 60 | 
         
            +
               --nnodes 2 \
         
     | 
| 61 | 
         
            +
               --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
         
     | 
| 62 | 
         
            +
               --rdzv_backend c10d \
         
     | 
| 63 | 
         
            +
               --max_restarts 0 \
         
     | 
| 64 | 
         
            +
               --tee 3 \
         
     | 
| 65 | 
         
            +
               --node_rank ${SLURM_PROCID}"
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
            # Checkout the bench_cluster branch
         
     | 
| 68 | 
         
            +
            cd $NANOTRON_REPO
         
     | 
| 69 | 
         
            +
            git checkout bench_cluster
         
     | 
| 70 | 
         
            +
            cd ..
         
     | 
| 71 | 
         
            +
            # Get the current job ID
         
     | 
| 72 | 
         
            +
            job_id=${SLURM_JOB_ID}
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            # Update status to "pending" or "running" in the background
         
     | 
| 75 | 
         
            +
            update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/status.txt &
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
            # Run the main command
         
     | 
| 78 | 
         
            +
            srun -u $LAUNCHER $CMD
         
     | 
| 79 | 
         
            +
            exit_status=$?
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            # Update status based on the exit status of `srun`
         
     | 
| 82 | 
         
            +
            if [ $exit_status -eq 0 ]; then
         
     | 
| 83 | 
         
            +
                printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/status.txt
         
     | 
| 84 | 
         
            +
            else
         
     | 
| 85 | 
         
            +
                if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/log.out; then
         
     | 
| 86 | 
         
            +
                    printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/status.txt
         
     | 
| 87 | 
         
            +
                elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/log.out; then
         
     | 
| 88 | 
         
            +
                    printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/status.txt
         
     | 
| 89 | 
         
            +
                elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/log.out; then
         
     | 
| 90 | 
         
            +
                    printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/status.txt
         
     | 
| 91 | 
         
            +
                else
         
     | 
| 92 | 
         
            +
                    printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/status.txt
         
     | 
| 93 | 
         
            +
                fi 
         
     | 
| 94 | 
         
            +
            fi
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
            # Run the report script if the job completed successfully
         
     | 
| 97 | 
         
            +
            if [ $exit_status -eq 0 ]; then
         
     | 
| 98 | 
         
            +
                python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256 --is_logs
         
     | 
| 99 | 
         
            +
                python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256 --is_profiler
         
     | 
| 100 | 
         
            +
            fi
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            # Push to hub the folder using huggingface_cli
         
     | 
| 104 | 
         
            +
            huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/bench_cluster/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256 llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256 --commit-message "Upload llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256"
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
            # Verify the upload
         
     | 
| 107 | 
         
            +
            if [ $? -eq 0 ]; then
         
     | 
| 108 | 
         
            +
                echo "Uploading to Huggingface Hub successful"
         
     | 
| 109 | 
         
            +
            else
         
     | 
| 110 | 
         
            +
                echo "Failed to upload to Huggingface Hub"
         
     | 
| 111 | 
         
            +
            fi
         
     | 
    	
        llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/config.yaml
    ADDED
    
    | 
         @@ -0,0 +1,90 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            general:
         
     | 
| 2 | 
         
            +
              project: bench_cluster
         
     | 
| 3 | 
         
            +
              seed: 42
         
     | 
| 4 | 
         
            +
            model:
         
     | 
| 5 | 
         
            +
              ddp_bucket_cap_mb: 25
         
     | 
| 6 | 
         
            +
              dtype: bfloat16
         
     | 
| 7 | 
         
            +
              init_method:
         
     | 
| 8 | 
         
            +
                std: 0.025
         
     | 
| 9 | 
         
            +
              make_vocab_size_divisible_by: 1
         
     | 
| 10 | 
         
            +
              model_config:
         
     | 
| 11 | 
         
            +
                bos_token_id: 1
         
     | 
| 12 | 
         
            +
                eos_token_id: 2
         
     | 
| 13 | 
         
            +
                hidden_act: silu
         
     | 
| 14 | 
         
            +
                hidden_size: 2048
         
     | 
| 15 | 
         
            +
                initializer_range: 0.02
         
     | 
| 16 | 
         
            +
                intermediate_size: 4096
         
     | 
| 17 | 
         
            +
                is_llama_config: true
         
     | 
| 18 | 
         
            +
                max_position_embeddings: 4096
         
     | 
| 19 | 
         
            +
                num_attention_heads: 32
         
     | 
| 20 | 
         
            +
                num_hidden_layers: 24
         
     | 
| 21 | 
         
            +
                num_key_value_heads: 32
         
     | 
| 22 | 
         
            +
                pad_token_id: null
         
     | 
| 23 | 
         
            +
                pretraining_tp: 1
         
     | 
| 24 | 
         
            +
                rms_norm_eps: 1.0e-05
         
     | 
| 25 | 
         
            +
                rope_scaling: null
         
     | 
| 26 | 
         
            +
                rope_theta: 10000.0
         
     | 
| 27 | 
         
            +
                tie_word_embeddings: true
         
     | 
| 28 | 
         
            +
                use_cache: true
         
     | 
| 29 | 
         
            +
                vocab_size: 50257
         
     | 
| 30 | 
         
            +
            optimizer:
         
     | 
| 31 | 
         
            +
              accumulate_grad_in_fp32: true
         
     | 
| 32 | 
         
            +
              clip_grad: 1.0
         
     | 
| 33 | 
         
            +
              learning_rate_scheduler:
         
     | 
| 34 | 
         
            +
                learning_rate: 0.0001
         
     | 
| 35 | 
         
            +
                lr_decay_style: linear
         
     | 
| 36 | 
         
            +
                lr_warmup_style: linear
         
     | 
| 37 | 
         
            +
                lr_warmup_steps: 1
         
     | 
| 38 | 
         
            +
                min_decay_lr: 1.0e-05
         
     | 
| 39 | 
         
            +
              optimizer_factory:
         
     | 
| 40 | 
         
            +
                adam_beta1: 0.9
         
     | 
| 41 | 
         
            +
                adam_beta2: 0.95
         
     | 
| 42 | 
         
            +
                adam_eps: 1.0e-08
         
     | 
| 43 | 
         
            +
                name: adamW
         
     | 
| 44 | 
         
            +
                torch_adam_is_fused: true
         
     | 
| 45 | 
         
            +
              weight_decay: 0.01
         
     | 
| 46 | 
         
            +
              zero_stage: 1
         
     | 
| 47 | 
         
            +
            parallelism:
         
     | 
| 48 | 
         
            +
              dp: 4
         
     | 
| 49 | 
         
            +
              expert_parallel_size: 1
         
     | 
| 50 | 
         
            +
              pp: 2
         
     | 
| 51 | 
         
            +
              pp_engine: 1f1b
         
     | 
| 52 | 
         
            +
              tp: 2
         
     | 
| 53 | 
         
            +
              tp_linear_async_communication: false
         
     | 
| 54 | 
         
            +
              tp_mode: REDUCE_SCATTER
         
     | 
| 55 | 
         
            +
            profiler:
         
     | 
| 56 | 
         
            +
              profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/tmp/remove/llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256
         
     | 
| 57 | 
         
            +
            tokenizer:
         
     | 
| 58 | 
         
            +
              tokenizer_max_length: null
         
     | 
| 59 | 
         
            +
              tokenizer_name_or_path: openai-community/gpt2
         
     | 
| 60 | 
         
            +
              tokenizer_revision: null
         
     | 
| 61 | 
         
            +
            data_stages:
         
     | 
| 62 | 
         
            +
            - name: Training Stage
         
     | 
| 63 | 
         
            +
              start_training_step: 1
         
     | 
| 64 | 
         
            +
              data:
         
     | 
| 65 | 
         
            +
                dataset:
         
     | 
| 66 | 
         
            +
                  dataset_overwrite_cache: false
         
     | 
| 67 | 
         
            +
                  dataset_processing_num_proc_per_process: 64
         
     | 
| 68 | 
         
            +
                  hf_dataset_config_name: null
         
     | 
| 69 | 
         
            +
                  hf_dataset_or_datasets: roneneldan/TinyStories
         
     | 
| 70 | 
         
            +
                  hf_dataset_splits: train
         
     | 
| 71 | 
         
            +
                  text_column_name: text
         
     | 
| 72 | 
         
            +
                num_loading_workers: 0
         
     | 
| 73 | 
         
            +
                seed: 42
         
     | 
| 74 | 
         
            +
            lighteval: null
         
     | 
| 75 | 
         
            +
            tokens:
         
     | 
| 76 | 
         
            +
              train_steps: 20
         
     | 
| 77 | 
         
            +
              val_check_interval: -1
         
     | 
| 78 | 
         
            +
              batch_accumulation_per_replica: 1
         
     | 
| 79 | 
         
            +
              limit_test_batches: 0
         
     | 
| 80 | 
         
            +
              limit_val_batches: 0
         
     | 
| 81 | 
         
            +
              micro_batch_size: 256
         
     | 
| 82 | 
         
            +
              sequence_length: 4096
         
     | 
| 83 | 
         
            +
            logging:
         
     | 
| 84 | 
         
            +
              iteration_step_info_interval: 1
         
     | 
| 85 | 
         
            +
              log_level: info
         
     | 
| 86 | 
         
            +
              log_level_replica: info
         
     | 
| 87 | 
         
            +
            checkpoints:
         
     | 
| 88 | 
         
            +
              checkpoint_interval: 100000
         
     | 
| 89 | 
         
            +
              checkpoints_path: /dev/null
         
     | 
| 90 | 
         
            +
              resume_checkpoint_path: null
         
     | 
    	
        llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/log.out
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        llama-1B/16_GPUS/dp-4_tp-2_pp-2_mbz-256/status.txt
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            oom
         
     |