• shell


    参考:

    部分语法

    num_threads=${NUM_THREADS:-64}
    echo ${num_threads}
    
    • 1
    • 2

    num_threads=${NUM_THREADS:-64}:这行代码使用了Shell脚本中的变量赋值语法。${NUM_THREADS} 是一个环境变量,表示在Shell中设置的名为 NUM_THREADS 的环境变量的值。如果 NUM_THREADS 环境变量已经设置了,那么 num_threads 将被赋值为 NUM_THREADS 的值;否则,如果 NUM_THREADS 环境变量未设置(或为空),那么 num_threads 将被赋值为默认值 64。

    一些源码:

    #!/usr/bin/env bash
    # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
    # REQUIRE: db_bench binary exists in the current directory
    
    # Exit Codes
    EXIT_INVALID_ARGS=1
    EXIT_NOT_COMPACTION_TEST=2
    EXIT_UNKNOWN_JOB=3
    
    DB_DIR=/data/rocksdb
    WAL_DIR=/data/rocksdb_wal
    
    # Size Constants
    K=1024
    M=$((1024 * K))
    G=$((1024 * M))
    T=$((1024 * G))
    
    function display_usage() {
      echo "usage: benchmark.sh [--help] "
      echo ""
      echo "These are the available benchmark tests:"
      echo -e "\tbulkload"
      echo -e "\tfillseq_disable_wal\t\tSequentially fill the database with no WAL"
      echo -e "\tfillseq_enable_wal\t\tSequentially fill the database with WAL"
      echo -e "\toverwrite"
      echo -e "\tupdaterandom"
      echo -e "\treadrandom"
      echo -e "\tmergerandom"
      echo -e "\tfilluniquerandom"
      echo -e "\tmultireadrandom"
      echo -e "\tfwdrange"
      echo -e "\trevrange"
      echo -e "\treadwhilewriting"
      echo -e "\treadwhilemerging"
      echo -e "\tfwdrangewhilewriting"
      echo -e "\trevrangewhilewriting"
      echo -e "\tfwdrangewhilemerging"
      echo -e "\trevrangewhilemerging"
      echo -e "\trandomtransaction"
      echo -e "\tuniversal_compaction"
      echo -e "\tdebug"
      echo ""
      echo "Generic enviroment Variables:"
      echo -e "\tJOB_ID\t\t\t\tAn identifier for the benchmark job, will appear in the results"
      echo -e "\tDB_DIR\t\t\t\tPath to write the database data directory"
      echo -e "\tWAL_DIR\t\t\t\tPath to write the database WAL directory"
      echo -e "\tOUTPUT_DIR\t\t\tPath to write the benchmark results to (default: /tmp)"
      echo -e "\tNUM_KEYS\t\t\tThe number of keys to use in the benchmark"
      echo -e "\tKEY_SIZE\t\t\tThe size of the keys to use in the benchmark (default: 20 bytes)"
      echo -e "\tVALUE_SIZE\t\t\tThe size of the values to use in the benchmark (default: 400 bytes)"
      echo -e "\tBLOCK_SIZE\t\t\tThe size of the database blocks in the benchmark (default: 8 KB)"
      echo -e "\tDB_BENCH_NO_SYNC\t\tDisable fsync on the WAL"
      echo -e "\tNUMACTL\t\t\t\tWhen defined use numactl --interleave=all"
      echo -e "\tNUM_THREADS\t\t\tThe number of threads to use (default: 64)"
      echo -e "\tMB_WRITE_PER_SEC\t\t\tRate limit for background writer"
      echo -e "\tNUM_NEXTS_PER_SEEK\t\t(default: 10)"
      echo -e "\tCACHE_SIZE\t\t\tSize of the block cache (default: 16GB)"
      echo -e "\tCACHE_NUMSHARDBITS\t\t\tNumber of shards for the block cache is 2 ** cache_numshardbits (default: 6)"
      echo -e "\tCOMPRESSION_MAX_DICT_BYTES"
      echo -e "\tCOMPRESSION_TYPE\t\tDefault compression(default: zstd)"
      echo -e "\tBOTTOMMOST_COMPRESSION\t\t(default: none)"
      echo -e "\tMIN_LEVEL_TO_COMPRESS\t\tValue for min_level_to_compress for Leveled"
      echo -e "\tCOMPRESSION_SIZE_PERCENT\tValue for compression_size_percent for Universal"
      echo -e "\tDURATION\t\t\tNumber of seconds for which the test runs"
      echo -e "\tWRITES\t\t\t\tNumber of writes for which the test runs"
      echo -e "\tWRITE_BUFFER_SIZE_MB\t\tThe size of the write buffer in MB (default: 128)"
      echo -e "\tTARGET_FILE_SIZE_BASE_MB\tThe value for target_file_size_base in MB (default: 128)"
      echo -e "\tMAX_BYTES_FOR_LEVEL_BASE_MB\tThe value for max_bytes_for_level_base in MB (default: 128)"
      echo -e "\tMAX_BACKGROUND_JOBS\t\tThe value for max_background_jobs (default: 16)"
      echo -e "\tCACHE_INDEX_AND_FILTER_BLOCKS\tThe value for cache_index_and_filter_blocks (default: 0)"
      echo -e "\tUSE_O_DIRECT\t\t\tUse O_DIRECT for user reads and compaction"
      echo -e "\tBYTES_PER_SYNC\t\t\tValue for bytes_per_sync, set to zero when USE_O_DIRECT is true"
      echo -e "\tSTATS_INTERVAL_SECONDS\t\tValue for stats_interval_seconds"
      echo -e "\tREPORT_INTERVAL_SECONDS\t\tValue for report_interval_seconds"
      echo -e "\tSUBCOMPACTIONS\t\t\tValue for subcompactions"
      echo -e "\tCOMPACTION_STYLE\t\tOne of leveled, universal, blob. Default is leveled."
      echo -e "\nEnvironment variables (mostly) for leveled compaction:"
      echo -e "\tLEVEL0_FILE_NUM_COMPACTION_TRIGGER\t\tValue for level0_file_num_compaction_trigger"
      echo -e "\tLEVEL0_SLOWDOWN_WRITES_TRIGGER\t\t\tValue for level0_slowdown_writes_trigger"
      echo -e "\tLEVEL0_STOP_WRITES_TRIGGER\t\t\tValue for level0_stop_writes_trigger"
      echo -e "\tPER_LEVEL_FANOUT\t\t\t\tValue for max_bytes_for_level_multiplier"
      echo -e "\tSOFT_PENDING_COMPACTION_BYTES_LIMIT_IN_GB\tThe value for soft_pending_compaction_bytes_limit in GB"
      echo -e "\tHARD_PENDING_COMPACTION_BYTES_LIMIT_IN_GB\tThe value for hard_pending_compaction_bytes_limit in GB"
      echo -e "\nEnvironment variables for universal compaction:"
      echo -e "\tUNIVERSAL_MIN_MERGE_WIDTH\tValue of min_merge_width option for universal"
      echo -e "\tUNIVERSAL_MAX_MERGE_WIDTH\tValue of min_merge_width option for universal"
      echo -e "\tUNIVERSAL_SIZE_RATIO\t\tValue of size_ratio option for universal"
      echo -e "\tUNIVERSAL_MAX_SIZE_AMP\t\tmax_size_amplification_percent for universal"
      echo -e "\tUNIVERSAL_ALLOW_TRIVIAL_MOVE\tSet allow_trivial_move to true for universal, default is false"
      echo -e "\nOptions for integrated BlobDB"
      echo -e "\tMIN_BLOB_SIZE\tValue for min_blob_size"
      echo -e "\tBLOB_FILE_SIZE\tValue for blob_file_size"
      echo -e "\tBLOB_COMPRESSION_TYPE\tValue for blob_compression_type"
      echo -e "\tBLOB_GC_AGE_CUTOFF\tValue for blob_garbage_collection_age_cutoff"
      echo -e "\tBLOB_GC_FORCE_THRESHOLD\tValue for blob_garbage_collection_force_threshold"
      echo -e "\tBLOB_FILE_STARTING_LEVEL\t\tBlob file starting level (default: 0)"
      echo -e "\tUSE_BLOB_CACHE\t\t\tEnable blob cache (default: 1)"
      echo -e "\tUSE_SHARED_BLOCK_AND_BLOB_CACHE\t\t\tUse the same backing cache for block cache and blob cache (default: 1)"
      echo -e "\tBLOB_CACHE_SIZE\t\t\tSize of the blob cache (default: 16GB)"
      echo -e "\tBLOB_CACHE_NUMSHARDBITS\t\t\tNumber of shards for the blob cache is 2 ** blob_cache_numshardbits (default: 6)"
      echo -e "\tPREPOPULATE_BLOB_CACHE\t\t\tPre-populate hot/warm blobs in blob cache (default: 0)"
    }
    
    if [ $# -lt 1 ]; then
      display_usage
      exit $EXIT_INVALID_ARGS
    fi
    bench_cmd=$1
    shift
    bench_args=$*
    
    if [[ "$bench_cmd" == "--help" ]]; then
      display_usage
      exit
    fi
    
    job_id=${JOB_ID}
    
    # Make it easier to run only the compaction test. Getting valid data requires
    # a number of iterations and having an ability to run the test separately from
    # rest of the benchmarks helps.
    if [ "$COMPACTION_TEST" == "1" -a "$bench_cmd" != "universal_compaction" ]; then
      echo "Skipping $1 because it's not a compaction test."
      exit $EXIT_NOT_COMPACTION_TEST
    fi
    
    if [ -z $DB_DIR ]; then
      echo "DB_DIR is not defined"
      exit $EXIT_INVALID_ARGS
    fi
    
    if [ -z $WAL_DIR ]; then
      echo "WAL_DIR is not defined"
      exit $EXIT_INVALID_ARGS
    fi
    
    output_dir=${OUTPUT_DIR:-/tmp}
    if [ ! -d $output_dir ]; then
      mkdir -p $output_dir
    fi
    
    report="$output_dir/report.tsv"
    schedule="$output_dir/schedule.txt"
    
    # all multithreaded tests run with sync=1 unless
    # $DB_BENCH_NO_SYNC is defined
    syncval="1"
    if [ ! -z $DB_BENCH_NO_SYNC ]; then
      echo "Turning sync off for all multithreaded tests"
      syncval="0";
    fi
    
    compaction_style=${COMPACTION_STYLE:-leveled}
    if [ $compaction_style = "leveled" ]; then
      echo Use leveled compaction
    elif [ $compaction_style = "universal" ]; then
      echo Use universal compaction
    elif [ $compaction_style = "blob" ]; then
      echo Use blob compaction
    else
      echo COMPACTION_STYLE is :: $COMPACTION_STYLE :: and must be one of leveled, universal, blob
      exit $EXIT_INVALID_ARGS
    fi
    
    num_threads=${NUM_THREADS:-64}
    mb_written_per_sec=${MB_WRITE_PER_SEC:-0}
    # Only for tests that do range scans
    num_nexts_per_seek=${NUM_NEXTS_PER_SEEK:-10}
    cache_size=${CACHE_SIZE:-$(( 16 * $G ))}
    cache_numshardbits=${CACHE_NUMSHARDBITS:-6}
    compression_max_dict_bytes=${COMPRESSION_MAX_DICT_BYTES:-0}
    compression_type=${COMPRESSION_TYPE:-zstd}
    min_level_to_compress=${MIN_LEVEL_TO_COMPRESS:-"-1"}
    compression_size_percent=${COMPRESSION_SIZE_PERCENT:-"-1"}
    
    duration=${DURATION:-0}
    writes=${WRITES:-0}
    
    num_keys=${NUM_KEYS:-8000000000}
    key_size=${KEY_SIZE:-20}
    value_size=${VALUE_SIZE:-400}
    block_size=${BLOCK_SIZE:-8192}
    write_buffer_mb=${WRITE_BUFFER_SIZE_MB:-128}
    target_file_mb=${TARGET_FILE_SIZE_BASE_MB:-128}
    l1_mb=${MAX_BYTES_FOR_LEVEL_BASE_MB:-1024}
    max_background_jobs=${MAX_BACKGROUND_JOBS:-16}
    stats_interval_seconds=${STATS_INTERVAL_SECONDS:-60}
    report_interval_seconds=${REPORT_INTERVAL_SECONDS:-1}
    subcompactions=${SUBCOMPACTIONS:-1}
    per_level_fanout=${PER_LEVEL_FANOUT:-8}
    
    cache_index_and_filter=${CACHE_INDEX_AND_FILTER_BLOCKS:-0}
    if [[ $cache_index_and_filter -eq 0 ]]; then
      cache_meta_flags=""
    elif [[ $cache_index_and_filter -eq 1 ]]; then
      cache_meta_flags="\
      --cache_index_and_filter_blocks=$cache_index_and_filter \
      --cache_high_pri_pool_ratio=0.5 --cache_low_pri_pool_ratio=0"
    else
      echo CACHE_INDEX_AND_FILTER_BLOCKS was $CACHE_INDEX_AND_FILTER_BLOCKS but must be 0 or 1
      exit $EXIT_INVALID_ARGS
    fi
    
    soft_pending_arg=""
    if [ ! -z $SOFT_PENDING_COMPACTION_BYTES_LIMIT_IN_GB ]; then
      soft_pending_bytes=$( echo $SOFT_PENDING_COMPACTION_BYTES_LIMIT_IN_GB | \
        awk '{ printf "%.0f", $1 * GB }' GB=$G )
      soft_pending_arg="--soft_pending_compaction_bytes_limit=$soft_pending_bytes"
    fi
    
    hard_pending_arg=""
    if [ ! -z $HARD_PENDING_COMPACTION_BYTES_LIMIT_IN_GB ]; then
      hard_pending_bytes=$( echo $HARD_PENDING_COMPACTION_BYTES_LIMIT_IN_GB | \
        awk '{ printf "%.0f", $1 * GB }' GB=$G )
      hard_pending_arg="--hard_pending_compaction_bytes_limit=$hard_pending_bytes"
    fi
    
    o_direct_flags=""
    if [ ! -z $USE_O_DIRECT ]; then
      # Some of these flags are only supported in new versions and --undefok makes that work
      o_direct_flags="--use_direct_reads --use_direct_io_for_flush_and_compaction --prepopulate_block_cache=1"
      bytes_per_sync=0
    else
      bytes_per_sync=${BYTES_PER_SYNC:-$(( 1 * M ))}
    fi
    
    univ_min_merge_width=${UNIVERSAL_MIN_MERGE_WIDTH:-2}
    univ_max_merge_width=${UNIVERSAL_MAX_MERGE_WIDTH:-20}
    univ_size_ratio=${UNIVERSAL_SIZE_RATIO:-1}
    univ_max_size_amp=${UNIVERSAL_MAX_SIZE_AMP:-200}
    
    if [ ! -z $UNIVERSAL_ALLOW_TRIVIAL_MOVE ]; then
      univ_allow_trivial_move=1
    else
      univ_allow_trivial_move=0
    fi
    
    min_blob_size=${MIN_BLOB_SIZE:-0}
    blob_file_size=${BLOB_FILE_SIZE:-$(( 256 * $M ))}
    blob_compression_type=${BLOB_COMPRESSION_TYPE:-${compression_type}}
    blob_gc_age_cutoff=${BLOB_GC_AGE_CUTOFF:-"0.25"}
    blob_gc_force_threshold=${BLOB_GC_FORCE_THRESHOLD:-1}
    blob_file_starting_level=${BLOB_FILE_STARTING_LEVEL:-0}
    use_blob_cache=${USE_BLOB_CACHE:-1}
    use_shared_block_and_blob_cache=${USE_SHARED_BLOCK_AND_BLOB_CACHE:-1}
    blob_cache_size=${BLOB_CACHE_SIZE:-$(( 16 * $G ))}
    blob_cache_numshardbits=${BLOB_CACHE_NUMSHARDBITS:-6}
    prepopulate_blob_cache=${PREPOPULATE_BLOB_CACHE:-0}
    
    # This script still works back to RocksDB 6.0
    undef_params="\
    use_blob_cache,\
    use_shared_block_and_blob_cache,\
    blob_cache_size,blob_cache_numshardbits,\
    prepopulate_blob_cache,\
    multiread_batched,\
    cache_low_pri_pool_ratio,\
    prepopulate_block_cache"
    
    const_params_base="
      --undefok=$undef_params \
      --db=$DB_DIR \
      --wal_dir=$WAL_DIR \
      \
      --num=$num_keys \
      --key_size=$key_size \
      --value_size=$value_size \
      --block_size=$block_size \
      --cache_size=$cache_size \
      --cache_numshardbits=$cache_numshardbits \
      --compression_max_dict_bytes=$compression_max_dict_bytes \
      --compression_ratio=0.5 \
      --compression_type=$compression_type \
      --bytes_per_sync=$bytes_per_sync \
      $cache_meta_flags \
      $o_direct_flags \
      --benchmark_write_rate_limit=$(( 1024 * 1024 * $mb_written_per_sec )) \
      \
      --write_buffer_size=$(( $write_buffer_mb * M)) \
      --target_file_size_base=$(( $target_file_mb * M)) \
      --max_bytes_for_level_base=$(( $l1_mb * M)) \
      \
      --verify_checksum=1 \
      --delete_obsolete_files_period_micros=$((60 * M)) \
      --max_bytes_for_level_multiplier=$per_level_fanout \
      \
      --statistics=0 \
      --stats_per_interval=1 \
      --stats_interval_seconds=$stats_interval_seconds \
      --report_interval_seconds=$report_interval_seconds \
      --histogram=1 \
      \
      --memtablerep=skip_list \
      --bloom_bits=10 \
      --open_files=-1 \
      --subcompactions=$subcompactions \
      \
      $bench_args"
    
    level_const_params="
      $const_params_base \
      --compaction_style=0 \
      --num_levels=8 \
      --min_level_to_compress=$min_level_to_compress \
      --level_compaction_dynamic_level_bytes=true \
      --pin_l0_filter_and_index_blocks_in_cache=1 \
      $soft_pending_arg \
      $hard_pending_arg \
    "
    
    # These inherit level_const_params because the non-blob LSM tree uses leveled compaction.
    blob_const_params="
      $level_const_params \
      --enable_blob_files=true \
      --min_blob_size=$min_blob_size \
      --blob_file_size=$blob_file_size \
      --blob_compression_type=$blob_compression_type \
      --enable_blob_garbage_collection=true \
      --blob_garbage_collection_age_cutoff=$blob_gc_age_cutoff \
      --blob_garbage_collection_force_threshold=$blob_gc_force_threshold \
      --blob_file_starting_level=$blob_file_starting_level \
      --use_blob_cache=$use_blob_cache \
      --use_shared_block_and_blob_cache=$use_shared_block_and_blob_cache \
      --blob_cache_size=$blob_cache_size \
      --blob_cache_numshardbits=$blob_cache_numshardbits \
      --prepopulate_blob_cache=$prepopulate_blob_cache \
    "
    
    # TODO:
    #   pin_l0_filter_and..., is this OK?
    univ_const_params="
      $const_params_base \
      --compaction_style=1 \
      --num_levels=40 \
      --universal_compression_size_percent=$compression_size_percent \
      --pin_l0_filter_and_index_blocks_in_cache=1 \
      --universal_min_merge_width=$univ_min_merge_width \
      --universal_max_merge_width=$univ_max_merge_width \
      --universal_size_ratio=$univ_size_ratio \
      --universal_max_size_amplification_percent=$univ_max_size_amp \
      --universal_allow_trivial_move=$univ_allow_trivial_move \
    "
    
    if [ $compaction_style == "leveled" ]; then
      const_params="$level_const_params"
      l0_file_num_compaction_trigger=${LEVEL0_FILE_NUM_COMPACTION_TRIGGER:-4}
      l0_slowdown_writes_trigger=${LEVEL0_SLOWDOWN_WRITES_TRIGGER:-20}
      l0_stop_writes_trigger=${LEVEL0_STOP_WRITES_TRIGGER:-30}
    elif [ $compaction_style == "universal" ]; then
      const_params="$univ_const_params"
      l0_file_num_compaction_trigger=${LEVEL0_FILE_NUM_COMPACTION_TRIGGER:-8}
      l0_slowdown_writes_trigger=${LEVEL0_SLOWDOWN_WRITES_TRIGGER:-20}
      l0_stop_writes_trigger=${LEVEL0_STOP_WRITES_TRIGGER:-30}
    else
      # compaction_style == "blob"
      const_params="$blob_const_params"
      l0_file_num_compaction_trigger=${LEVEL0_FILE_NUM_COMPACTION_TRIGGER:-4}
      l0_slowdown_writes_trigger=${LEVEL0_SLOWDOWN_WRITES_TRIGGER:-20}
      l0_stop_writes_trigger=${LEVEL0_STOP_WRITES_TRIGGER:-30}
    fi
    
    l0_config="
      --level0_file_num_compaction_trigger=$l0_file_num_compaction_trigger \
      --level0_slowdown_writes_trigger=$l0_slowdown_writes_trigger \
      --level0_stop_writes_trigger=$l0_stop_writes_trigger"
    
    # You probably don't want to set both --writes and --duration
    if [ $duration -gt 0 ]; then
      const_params="$const_params --duration=$duration"
    fi
    if [ $writes -gt 0 ]; then
      const_params="$const_params --writes=$writes"
    fi
    
    params_w="$l0_config \
              --max_background_jobs=$max_background_jobs \
              --max_write_buffer_number=8 \
              $const_params"
    
    params_bulkload="--max_background_jobs=$max_background_jobs \
                     --max_write_buffer_number=8 \
                     --allow_concurrent_memtable_write=false \
                     --level0_file_num_compaction_trigger=$((10 * M)) \
                     --level0_slowdown_writes_trigger=$((10 * M)) \
                     --level0_stop_writes_trigger=$((10 * M)) \
                     $const_params "
    
    params_fillseq="--allow_concurrent_memtable_write=false \
                    $params_w "
    
    #
    # Tune values for level and universal compaction.
    # For universal compaction, these level0_* options mean total sorted of runs in
    # LSM. In level-based compaction, it means number of L0 files.
    #
    params_level_compact="$const_params \
                    --max_background_flushes=4 \
                    --max_write_buffer_number=4 \
                    --level0_file_num_compaction_trigger=4 \
                    --level0_slowdown_writes_trigger=16 \
                    --level0_stop_writes_trigger=20"
    
    params_univ_compact="$const_params \
                    --max_background_flushes=4 \
                    --max_write_buffer_number=4 \
                    --level0_file_num_compaction_trigger=8 \
                    --level0_slowdown_writes_trigger=16 \
                    --level0_stop_writes_trigger=20"
    
    tsv_header="ops_sec\tmb_sec\tlsm_sz\tblob_sz\tc_wgb\tw_amp\tc_mbps\tc_wsecs\tc_csecs\tb_rgb\tb_wgb\tusec_op\tp50\tp99\tp99.9\tp99.99\tpmax\tuptime\tstall%\tNstall\tu_cpu\ts_cpu\trss\ttest\tdate\tversion\tjob_id\tgithash"
    
    function get_cmd() {
      output=$1
    
      numa=""
      if [ ! -z $NUMACTL ]; then
        numa="numactl --interleave=all "
      fi
    
      # Try to use timeout when duration is set because some tests (revrange*) hang
      # for some versions (v6.10, v6.11).
      timeout_cmd=""
      if [ $duration -gt 0 ]; then
        if hash timeout ; then
          timeout_cmd="timeout $(( $duration + 600 ))"
        fi
      fi
    
      echo "/usr/bin/time -f '%e %U %S' -o $output $numa $timeout_cmd"
    }
    
    function month_to_num() {
        local date_str=$1
        date_str="${date_str/Jan/01}"
        date_str="${date_str/Feb/02}"
        date_str="${date_str/Mar/03}"
        date_str="${date_str/Apr/04}"
        date_str="${date_str/May/05}"
        date_str="${date_str/Jun/06}"
        date_str="${date_str/Jul/07}"
        date_str="${date_str/Aug/08}"
        date_str="${date_str/Sep/09}"
        date_str="${date_str/Oct/10}"
        date_str="${date_str/Nov/11}"
        date_str="${date_str/Dec/12}"
        echo $date_str
    }
    
    function start_stats {
      output=$1
      iostat -y -mx 1  >& $output.io &
      vmstat 1 >& $output.vm &
      # tail -1 because "ps | grep db_bench" returns 2 entries and we want the second
      while :; do ps aux | grep db_bench | grep -v grep | tail -1; sleep 10; done >& $output.ps &
      # This sets a global value
      pspid=$!
    
      while :; do
        b_gb=$( ls -l $DB_DIR 2> /dev/null | grep blob | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
        s_gb=$( ls -l $DB_DIR 2> /dev/null | grep sst | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
        l_gb=$( ls -l $WAL_DIR 2> /dev/null | grep log | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
        a_gb=$( ls -l $DB_DIR 2> /dev/null | awk '{ c += 1; b += $5 } END { printf "%.1f", b / (1024*1024*1024) }' )
        ts=$( date +%H%M%S )
        echo -e "${a_gb}\t${s_gb}\t${l_gb}\t${b_gb}\t${ts}"
        sleep 10
      done >& $output.sizes &
      # This sets a global value
      szpid=$!
    }
    
    function stop_stats {
      output=$1
      kill $pspid
      kill $szpid
      killall iostat
      killall vmstat
      sleep 1
      gzip $output.io
      gzip $output.vm
    
      am=$( sort -nk 1,1 $output.sizes | tail -1 | awk '{ print $1 }' )
      sm=$( sort -nk 2,2 $output.sizes | tail -1 | awk '{ print $2 }' )
      lm=$( sort -nk 3,3 $output.sizes | tail -1 | awk '{ print $3 }' )
      bm=$( sort -nk 4,4 $output.sizes | tail -1 | awk '{ print $4 }' )
      echo -e "max sizes (GB): $am all, $sm sst, $lm log, $bm blob" >> $output.sizes
    }
    
    function units_as_gb {
      size=$1
      units=$2
    
      case $units in
        MB)
          echo "$size" | awk '{ printf "%.1f", $1 / 1024.0 }'
          ;;
        GB)
          echo "$size"
          ;;
        TB)
          echo "$size" | awk '{ printf "%.1f", $1 * 1024.0 }'
          ;;
        *)
          echo "NA"
          ;;
      esac
    }
    
    function summarize_result {
      test_out=$1
      test_name=$2
      bench_name=$3
    
      # In recent versions these can be found directly via db_bench --version, --build_info but
      # grepping from the log lets this work on older versions.
      version="$( grep "RocksDB version:" "$DB_DIR"/LOG | head -1 | awk '{ printf "%s", $5 }' )"
      git_hash="$( grep "Git sha" "$DB_DIR"/LOG | head -1 | awk '{ printf "%s", substr($5, 1, 10) }' )"
    
      # Note that this function assumes that the benchmark executes long enough so
      # that "Compaction Stats" is written to stdout at least once. If it won't
      # happen then empty output from grep when searching for "Sum" will cause
      # syntax errors.
      date=$( grep ^Date: $test_out | awk '{ print $6 "-" $3 "-" $4 "T" $5 }' )
      my_date=$( month_to_num $date )
      uptime=$( grep ^Uptime\(secs $test_out | tail -1 | awk '{ printf "%.0f", $2 }' )
      stall_pct=$( grep "^Cumulative stall" $test_out| tail -1  | awk '{  print $5 }' )
      nstall=$( grep ^Stalls\(count\):  $test_out | tail -1 | awk '{ print $2 + $6 + $10 + $14 + $18 + $20 }' )
    
      if ! grep ^"$bench_name" "$test_out" > /dev/null 2>&1 ; then
        echo -e "failed\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t$test_name\t$my_date\t$version\t$job_id\t$git_hash"
        return
      fi
    
      # Output formats
      # V1: readrandom   :      10.218 micros/op 3131616 ops/sec; 1254.3 MB/s (176144999 of 176144999 found)
      # The MB/s is mssing for multireadrandom
      # V1a: multireadrandom :      10.164 micros/op 3148272 ops/sec; (177099990 of 177099990 found)
      # V1: overwrite    :       7.939 micros/op 125963 ops/sec;   50.5 MB/s
      # V2: overwrite    :       7.854 micros/op 127320 ops/sec 1800.001 seconds 229176999 operations;   51.0 MB/s
    
      format_version=$( grep ^"$bench_name" "$test_out" \
        | awk '{ if (NF >= 10 && $8 == "seconds") { print "V2" } else { print "V1" } }' )
      if [ $format_version == "V1" ]; then
        ops_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $5 }' )
        usecs_op=$( grep ^"$bench_name" "$test_out" | awk '{ printf "%.1f", $3 }' )
        if [ "$bench_name" == "multireadrandom" ]; then
          mb_sec="NA"
        else
          mb_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $7 }' )
        fi
      else
        ops_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $5 }' )
        usecs_op=$( grep ^"$bench_name" "$test_out" | awk '{ printf "%.1f", $3 }' )
        mb_sec=$( grep ^"$bench_name" "$test_out" | awk '{ print $11 }' )
      fi
    
      # For RocksDB version 4.x there are fewer fields but this still parses correctly
      # Cumulative writes: 242M writes, 242M keys, 18M commit groups, 12.9 writes per commit group, ingest: 95.96 GB, 54.69 MB/s
      cum_writes_gb_orig=$( grep "^Cumulative writes" "$test_out" | tail -1 | awk '{ for (x=1; x<=NF; x++) { if ($x == "ingest:") { printf "%.1f", $(x+1) } } }' )
      cum_writes_units=$( grep "^Cumulative writes" "$test_out" | tail -1 | awk '{ for (x=1; x<=NF; x++) { if ($x == "ingest:") { print $(x+2) } } }' | sed 's/,//g' )
      cum_writes_gb=$( units_as_gb "$cum_writes_gb_orig" "$cum_writes_units" )
    
      # Cumulative compaction: 1159.74 GB write, 661.03 MB/s write, 1108.89 GB read, 632.04 MB/s read, 6284.3 seconds
      cmb_ps=$( grep "^Cumulative compaction" "$test_out" | tail -1 | awk '{ printf "%.1f", $6 }' )
      sum_wgb_orig=$( grep "^Cumulative compaction" "$test_out" | tail -1 | awk '{ printf "%.1f", $3 }' )
      sum_wgb_units=$( grep "^Cumulative compaction" "$test_out" | tail -1 | awk '{ print $4 }' )
      sum_wgb=$( units_as_gb "$sum_wgb_orig" "$sum_wgb_units" )
    
      # Flush(GB): cumulative 97.193, interval 1.247
      flush_wgb=$( grep "^Flush(GB)" "$test_out" | tail -1 | awk '{ print $3 }' | tr ',' ' ' | awk '{ print $1 }' )
    
      if [[ "$sum_wgb" == "NA" || \
            "$cum_writes_gb" == "NA" || \
            "$cum_writes_gb_orig" == "0.0" || \
            -z "$cum_writes_gb_orig" || \
            -z "$flush_wgb" ]]; then
        wamp="NA"
      else
        wamp=$( echo "( $sum_wgb + $flush_wgb ) / $cum_writes_gb" | bc -l | awk '{ printf "%.1f", $1 }' )
      fi
    
      c_wsecs=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $15 }' )
      c_csecs=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $16 }' )
    
      lsm_size=$( grep "^ Sum" "$test_out" | tail -1 | awk '{ printf "%.0f%s", $3, $4 }' )
      blob_size=$( grep "^Blob file count:" "$test_out" | tail -1 | awk '{ printf "%.0f%s", $7, $8 }' )
      # Remove the trailing comma from blob_size: 3.0GB, -> 3.0GB
      blob_size="${blob_size/,/}"
    
      b_rgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $21 }' )
      b_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.0f", $22 }' )
    
      p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' )
      p99=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $7 }' )
      p999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $9 }' )
      p9999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $11 }' )
      pmax=$( grep "^Min: " $test_out | grep Median: | grep Max: | awk '{ printf "%.0f", $6 }' )
    
      # Use the last line because there might be extra lines when the db_bench process exits with an error
      time_out="$test_out".time
      u_cpu=$( tail -1 "$time_out" | awk '{ printf "%.1f", $2 / 1000.0 }' )
      s_cpu=$( tail -1 "$time_out" | awk '{ printf "%.1f", $3 / 1000.0  }' )
    
      rss="NA"
      if [ -f $test_out.stats.ps ]; then
        rss=$( awk '{ printf "%.1f\n", $6 / (1024 * 1024) }' "$test_out".stats.ps | sort -n | tail -1 )
      fi
    
      # if the report TSV (Tab Separate Values) file does not yet exist, create it and write the header row to it
      if [ ! -f "$report" ]; then
        echo -e "# ops_sec - operations per second" >> "$report"
        echo -e "# mb_sec - ops_sec * size-of-operation-in-MB" >> "$report"
        echo -e "# lsm_sz - size of LSM tree" >> "$report"
        echo -e "# blob_sz - size of BlobDB logs" >> "$report"
        echo -e "# c_wgb - GB written by compaction" >> "$report"
        echo -e "# w_amp - Write-amplification as (bytes written by compaction / bytes written by memtable flush)" >> "$report"
        echo -e "# c_mbps - Average write rate for compaction" >> "$report"
        echo -e "# c_wsecs - Wall clock seconds doing compaction" >> "$report"
        echo -e "# c_csecs - CPU seconds doing compaction" >> "$report"
        echo -e "# b_rgb - Blob compaction read GB" >> "$report"
        echo -e "# b_wgb - Blob compaction write GB" >> "$report"
        echo -e "# usec_op - Microseconds per operation" >> "$report"
        echo -e "# p50, p99, p99.9, p99.99 - 50th, 99th, 99.9th, 99.99th percentile response time in usecs" >> "$report"
        echo -e "# pmax - max response time in usecs" >> "$report"
        echo -e "# uptime - RocksDB uptime in seconds" >> "$report"
        echo -e "# stall% - Percentage of time writes are stalled" >> "$report"
        echo -e "# Nstall - Number of stalls" >> "$report"
        echo -e "# u_cpu - #seconds/1000 of user CPU" >> "$report"
        echo -e "# s_cpu - #seconds/1000 of system CPU" >> "$report"
        echo -e "# rss - max RSS in GB for db_bench process" >> "$report"
        echo -e "# test - Name of test" >> "$report"
        echo -e "# date - Date/time of test" >> "$report"
        echo -e "# version - RocksDB version" >> "$report"
        echo -e "# job_id - User-provided job ID" >> "$report"
        echo -e "# githash - git hash at which db_bench was compiled" >> "$report"
        echo -e $tsv_header >> "$report"
      fi
    
      echo -e "$ops_sec\t$mb_sec\t$lsm_size\t$blob_size\t$sum_wgb\t$wamp\t$cmb_ps\t$c_wsecs\t$c_csecs\t$b_rgb\t$b_wgb\t$usecs_op\t$p50\t$p99\t$p999\t$p9999\t$pmax\t$uptime\t$stall_pct\t$nstall\t$u_cpu\t$s_cpu\t$rss\t$test_name\t$my_date\t$version\t$job_id\t$git_hash" \
        >> "$report"
    }
    
    function run_bulkload {
      # This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the
      # client can discover where to restart a load after a crash. I think this is a good way to load.
      echo "Bulk loading $num_keys random keys"
      log_file_name=$output_dir/benchmark_bulkload_fillrandom.log
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=fillrandom,stats \
           --use_existing_db=0 \
           --disable_auto_compactions=1 \
           --sync=0 \
           $params_bulkload \
           --threads=1 \
           --memtablerep=vector \
           --allow_concurrent_memtable_write=false \
           --disable_wal=1 \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      eval $cmd
      summarize_result $log_file_name bulkload fillrandom
    
      echo "Compacting..."
      log_file_name=$output_dir/benchmark_bulkload_compact.log
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=compact,stats \
           --use_existing_db=1 \
           --disable_auto_compactions=1 \
           --sync=0 \
           $params_w \
           --threads=1 \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      eval $cmd
    }
    
    #
    # Parameter description:
    #
    # $1 - 1 if I/O statistics should be collected.
    # $2 - compaction type to use (level=0, universal=1).
    # $3 - number of subcompactions.
    # $4 - number of maximum background compactions.
    #
    function run_manual_compaction_worker {
      # This runs with a vector memtable and the WAL disabled to load faster.
      # It is still crash safe and the client can discover where to restart a
      # load after a crash. I think this is a good way to load.
      echo "Bulk loading $num_keys random keys for manual compaction."
    
      log_file_name=$output_dir/benchmark_man_compact_fillrandom_$3.log
    
      if [ "$2" == "1" ]; then
        extra_params=$params_univ_compact
      else
        extra_params=$params_level_compact
      fi
    
      # Make sure that fillrandom uses the same compaction options as compact.
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=fillrandom,stats \
           --use_existing_db=0 \
           --disable_auto_compactions=0 \
           --sync=0 \
           $extra_params \
           --threads=$num_threads \
           --compaction_measure_io_stats=$1 \
           --compaction_style=$2 \
           --subcompactions=$3 \
           --memtablerep=vector \
           --allow_concurrent_memtable_write=false \
           --disable_wal=1 \
           --max_background_compactions=$4 \
           --seed=$( date +%s ) \
           2>&1 | tee -a $log_file_name"
    
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      eval $cmd
    
      summarize_result $log_file_namefillrandom_output_file man_compact_fillrandom_$3 fillrandom
    
      echo "Compacting with $3 subcompactions specified ..."
    
      log_file_name=$output_dir/benchmark_man_compact_$3.log
    
      # This is the part we're really interested in. Given that compact benchmark
      # doesn't output regular statistics then we'll just use the time command to
      # measure how long this step takes.
      cmd="{ \
           time ./db_bench --benchmarks=compact,stats \
           --use_existing_db=1 \
           --disable_auto_compactions=0 \
           --sync=0 \
           $extra_params \
           --threads=$num_threads \
           --compaction_measure_io_stats=$1 \
           --compaction_style=$2 \
           --subcompactions=$3 \
           --max_background_compactions=$4 \
           ;}
           2>&1 | tee -a $log_file_name"
    
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      eval $cmd
    
      # Can't use summarize_result here. One way to analyze the results is to run
      # "grep real" on the resulting log files.
    }
    
    function run_univ_compaction {
      # Always ask for I/O statistics to be measured.
      io_stats=1
    
      # Values: kCompactionStyleLevel = 0x0, kCompactionStyleUniversal = 0x1.
      compaction_style=1
    
      # Define a set of benchmarks.
      subcompactions=(1 2 4 8 16)
      max_background_compactions=(16 16 8 4 2)
    
      i=0
      total=${#subcompactions[@]}
    
      # Execute a set of benchmarks to cover variety of scenarios.
      while [ "$i" -lt "$total" ]
      do
        run_manual_compaction_worker $io_stats $compaction_style ${subcompactions[$i]} \
          ${max_background_compactions[$i]}
        ((i++))
      done
    }
    
    function run_fillseq {
      # This runs with a vector memtable. WAL can be either disabled or enabled
      # depending on the input parameter (1 for disabled, 0 for enabled). The main
      # benefit behind disabling WAL is to make loading faster. It is still crash
      # safe and the client can discover where to restart a load after a crash. I
      # think this is a good way to load.
    
      # Make sure that we'll have unique names for all the files so that data won't
      # be overwritten.
      if [ $1 == 1 ]; then
        log_file_name="${output_dir}/benchmark_fillseq.wal_disabled.v${value_size}.log"
        test_name=fillseq.wal_disabled.v${value_size}
      else
        log_file_name="${output_dir}/benchmark_fillseq.wal_enabled.v${value_size}.log"
        test_name=fillseq.wal_enabled.v${value_size}
      fi
    
      # For Leveled compaction hardwire this to 0 so that data that is trivial-moved
      # to larger levels (3, 4, etc) will be compressed.
      if [ $compaction_style == "leveled" ]; then
        comp_arg="--min_level_to_compress=0"
      elif [ $compaction_style == "universal" ]; then
        if [ ! -z $UNIVERSAL_ALLOW_TRIVIAL_MOVE ]; then
          # See GetCompressionFlush where compression_size_percent < 1 means use the default
          # compression which is needed because trivial moves are enabled
          comp_arg="--universal_compression_size_percent=-1"
        else
          # See GetCompressionFlush where compression_size_percent > 0 means no compression.
          # Don't set anything here because compression_size_percent is set in univ_const_params
          comp_arg=""
        fi
      else
        # compaction_style == "blob"
        comp_arg="--min_level_to_compress=0"
      fi
    
      echo "Loading $num_keys keys sequentially"
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=fillseq,stats \
           $params_fillseq \
           $comp_arg \
           --use_existing_db=0 \
           --sync=0 \
           --threads=1 \
           --memtablerep=vector \
           --allow_concurrent_memtable_write=false \
           --disable_wal=$1 \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
    
      # The constant "fillseq" which we pass to db_bench is the benchmark name.
      summarize_result $log_file_name $test_name fillseq
    }
    
    function run_lsm {
      # This flushes the memtable and L0 to get the LSM tree into a deterministic
      # state for read-only tests that will follow.
      echo "Flush memtable, wait, compact L0, wait"
      job=$1
    
      if [ $job = flush_mt_l0 ]; then
        benchmarks=levelstats,flush,waitforcompaction,compact0,waitforcompaction,memstats,levelstats
      elif [ $job = waitforcompaction ]; then
        benchmarks=levelstats,waitforcompaction,memstats,levelstats
      else
        echo Job unknown: $job
        exit $EXIT_NOT_COMPACTION_TEST
      fi
    
      log_file_name=$output_dir/benchmark_${job}.log
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=$benchmarks,stats \
           --use_existing_db=1 \
           --sync=0 \
           $params_w \
           --threads=1 \
           --seed=$( date +%s ) \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      # waitforcompaction can hang with universal (compaction_style=1)
      # see bug https://github.com/facebook/rocksdb/issues/9275
      eval $cmd
      stop_stats $log_file_name.stats
      # Don't summarize, the log doesn't have the output needed for it
    }
    
    function run_change {
      output_name=$1
      grep_name=$2
      benchmarks=$3
      echo "Do $num_keys random $output_name"
      log_file_name="$output_dir/benchmark_${output_name}.t${num_threads}.s${syncval}.log"
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=$benchmarks,stats \
           --use_existing_db=1 \
           --sync=$syncval \
           $params_w \
           --threads=$num_threads \
           --merge_operator=\"put\" \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
      summarize_result $log_file_name ${output_name}.t${num_threads}.s${syncval} $grep_name
    }
    
    function run_filluniquerandom {
      echo "Loading $num_keys unique keys randomly"
      log_file_name=$output_dir/benchmark_filluniquerandom.log
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=filluniquerandom,stats \
           --use_existing_db=0 \
           --sync=0 \
           $params_w \
           --threads=1 \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
      summarize_result $log_file_name filluniquerandom filluniquerandom
    }
    
    function run_readrandom {
      echo "Reading $num_keys random keys"
      log_file_name="${output_dir}/benchmark_readrandom.t${num_threads}.log"
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=readrandom,stats \
           --use_existing_db=1 \
           $params_w \
           --threads=$num_threads \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
      summarize_result $log_file_name readrandom.t${num_threads} readrandom
    }
    
    function run_multireadrandom {
      echo "Multi-Reading $num_keys random keys"
      log_file_name="${output_dir}/benchmark_multireadrandom.t${num_threads}.log"
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=multireadrandom,stats \
           --use_existing_db=1 \
           --threads=$num_threads \
           --batch_size=10 \
           $params_w \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
      summarize_result $log_file_name multireadrandom.t${num_threads} multireadrandom
    }
    
    function run_readwhile {
      operation=$1
      echo "Reading $num_keys random keys while $operation"
      log_file_name="${output_dir}/benchmark_readwhile${operation}.t${num_threads}.log"
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench --benchmarks=readwhile${operation},stats \
           --use_existing_db=1 \
           --sync=$syncval \
           $params_w \
           --threads=$num_threads \
           --merge_operator=\"put\" \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
      summarize_result $log_file_name readwhile${operation}.t${num_threads} readwhile${operation}
    }
    
    function run_rangewhile {
      operation=$1
      full_name=$2
      reverse_arg=$3
      log_file_name="${output_dir}/benchmark_${full_name}.t${num_threads}.log"
      time_cmd=$( get_cmd $log_file_name.time )
      echo "Range scan $num_keys random keys while ${operation} for reverse_iter=${reverse_arg}"
      cmd="$time_cmd ./db_bench --benchmarks=seekrandomwhile${operation},stats \
           --use_existing_db=1 \
           --sync=$syncval \
           $params_w \
           --threads=$num_threads \
           --merge_operator=\"put\" \
           --seek_nexts=$num_nexts_per_seek \
           --reverse_iterator=$reverse_arg \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      echo $cmd | tee $log_file_name
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
      summarize_result $log_file_name ${full_name}.t${num_threads} seekrandomwhile${operation}
    }
    
    function run_range {
      full_name=$1
      reverse_arg=$2
      log_file_name="${output_dir}/benchmark_${full_name}.t${num_threads}.log"
      time_cmd=$( get_cmd $log_file_name.time )
      echo "Range scan $num_keys random keys for reverse_iter=${reverse_arg}"
      cmd="$time_cmd ./db_bench --benchmarks=seekrandom,stats \
           --use_existing_db=1 \
           $params_w \
           --threads=$num_threads \
           --seek_nexts=$num_nexts_per_seek \
           --reverse_iterator=$reverse_arg \
           --seed=$( date +%s ) \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee -a $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
      summarize_result $log_file_name ${full_name}.t${num_threads} seekrandom
    }
    
    function run_randomtransaction {
      echo "..."
      log_file_name=$output_dir/benchmark_randomtransaction.log
      time_cmd=$( get_cmd $log_file_name.time )
      cmd="$time_cmd ./db_bench $params_w --benchmarks=randomtransaction,stats \
           --num=$num_keys \
           --transaction_db \
           --threads=5 \
           --transaction_sets=5 \
           --report_file=${log_file_name}.r.csv \
           2>&1 | tee $log_file_name"
      if [[ "$job_id" != "" ]]; then
        echo "Job ID: ${job_id}" > $log_file_name
        echo $cmd | tee -a $log_file_name
      else
        echo $cmd | tee $log_file_name
      fi
      start_stats $log_file_name.stats
      eval $cmd
      stop_stats $log_file_name.stats
    }
    
    function now() {
      echo `date +"%s"`
    }
    
    
    echo "===== Benchmark ====="
    
    # Run!!!
    IFS=',' read -a jobs <<< $bench_cmd
    # shellcheck disable=SC2068
    for job in ${jobs[@]}; do
    
      if [ $job != debug ]; then
        echo "Starting $job (ID: $job_id) at `date`" | tee -a $schedule
      fi
    
      start=$(now)
      if [ $job = bulkload ]; then
        run_bulkload
      elif [ $job = flush_mt_l0 ]; then
        run_lsm flush_mt_l0
      elif [ $job = waitforcompaction ]; then
        run_lsm waitforcompaction
      elif [ $job = fillseq_disable_wal ]; then
        run_fillseq 1
      elif [ $job = fillseq_enable_wal ]; then
        run_fillseq 0
      elif [ $job = overwrite ]; then
        run_change overwrite overwrite overwrite
      elif [ $job = overwritesome ]; then
        # This uses a different name for overwrite results so it can be run twice in one benchmark run.
        run_change overwritesome overwrite overwrite
      elif [ $job = overwriteandwait ]; then
        run_change overwriteandwait overwrite overwrite,waitforcompaction
      elif [ $job = updaterandom ]; then
        run_change updaterandom updaterandom updaterandom
      elif [ $job = mergerandom ]; then
        run_change mergerandom mergerandom mergerandom
      elif [ $job = filluniquerandom ]; then
        run_filluniquerandom
      elif [ $job = readrandom ]; then
        run_readrandom
      elif [ $job = multireadrandom ]; then
        run_multireadrandom
      elif [ $job = fwdrange ]; then
        run_range $job false
      elif [ $job = revrange ]; then
        run_range $job true
      elif [ $job = readwhilewriting ]; then
        run_readwhile writing
      elif [ $job = readwhilemerging ]; then
        run_readwhile merging
      elif [ $job = fwdrangewhilewriting ]; then
        run_rangewhile writing $job false
      elif [ $job = revrangewhilewriting ]; then
        run_rangewhile writing $job true
      elif [ $job = fwdrangewhilemerging ]; then
        run_rangewhile merging $job false
      elif [ $job = revrangewhilemerging ]; then
        run_rangewhile merging $job true
      elif [ $job = randomtransaction ]; then
        run_randomtransaction
      elif [ $job = universal_compaction ]; then
        run_univ_compaction
      elif [ $job = debug ]; then
        num_keys=1000; # debug
        echo "Setting num_keys to $num_keys"
      else
        echo "unknown job $job"
        exit $EXIT_UNKNOWN_JOB
      fi
      end=$(now)
    
      if [ $job != debug ]; then
        echo "Completed $job (ID: $job_id) in $((end-start)) seconds" | tee -a $schedule
      fi
    
      echo -e $tsv_header
      tail -1 $report
    
    done
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223
    • 224
    • 225
    • 226
    • 227
    • 228
    • 229
    • 230
    • 231
    • 232
    • 233
    • 234
    • 235
    • 236
    • 237
    • 238
    • 239
    • 240
    • 241
    • 242
    • 243
    • 244
    • 245
    • 246
    • 247
    • 248
    • 249
    • 250
    • 251
    • 252
    • 253
    • 254
    • 255
    • 256
    • 257
    • 258
    • 259
    • 260
    • 261
    • 262
    • 263
    • 264
    • 265
    • 266
    • 267
    • 268
    • 269
    • 270
    • 271
    • 272
    • 273
    • 274
    • 275
    • 276
    • 277
    • 278
    • 279
    • 280
    • 281
    • 282
    • 283
    • 284
    • 285
    • 286
    • 287
    • 288
    • 289
    • 290
    • 291
    • 292
    • 293
    • 294
    • 295
    • 296
    • 297
    • 298
    • 299
    • 300
    • 301
    • 302
    • 303
    • 304
    • 305
    • 306
    • 307
    • 308
    • 309
    • 310
    • 311
    • 312
    • 313
    • 314
    • 315
    • 316
    • 317
    • 318
    • 319
    • 320
    • 321
    • 322
    • 323
    • 324
    • 325
    • 326
    • 327
    • 328
    • 329
    • 330
    • 331
    • 332
    • 333
    • 334
    • 335
    • 336
    • 337
    • 338
    • 339
    • 340
    • 341
    • 342
    • 343
    • 344
    • 345
    • 346
    • 347
    • 348
    • 349
    • 350
    • 351
    • 352
    • 353
    • 354
    • 355
    • 356
    • 357
    • 358
    • 359
    • 360
    • 361
    • 362
    • 363
    • 364
    • 365
    • 366
    • 367
    • 368
    • 369
    • 370
    • 371
    • 372
    • 373
    • 374
    • 375
    • 376
    • 377
    • 378
    • 379
    • 380
    • 381
    • 382
    • 383
    • 384
    • 385
    • 386
    • 387
    • 388
    • 389
    • 390
    • 391
    • 392
    • 393
    • 394
    • 395
    • 396
    • 397
    • 398
    • 399
    • 400
    • 401
    • 402
    • 403
    • 404
    • 405
    • 406
    • 407
    • 408
    • 409
    • 410
    • 411
    • 412
    • 413
    • 414
    • 415
    • 416
    • 417
    • 418
    • 419
    • 420
    • 421
    • 422
    • 423
    • 424
    • 425
    • 426
    • 427
    • 428
    • 429
    • 430
    • 431
    • 432
    • 433
    • 434
    • 435
    • 436
    • 437
    • 438
    • 439
    • 440
    • 441
    • 442
    • 443
    • 444
    • 445
    • 446
    • 447
    • 448
    • 449
    • 450
    • 451
    • 452
    • 453
    • 454
    • 455
    • 456
    • 457
    • 458
    • 459
    • 460
    • 461
    • 462
    • 463
    • 464
    • 465
    • 466
    • 467
    • 468
    • 469
    • 470
    • 471
    • 472
    • 473
    • 474
    • 475
    • 476
    • 477
    • 478
    • 479
    • 480
    • 481
    • 482
    • 483
    • 484
    • 485
    • 486
    • 487
    • 488
    • 489
    • 490
    • 491
    • 492
    • 493
    • 494
    • 495
    • 496
    • 497
    • 498
    • 499
    • 500
    • 501
    • 502
    • 503
    • 504
    • 505
    • 506
    • 507
    • 508
    • 509
    • 510
    • 511
    • 512
    • 513
    • 514
    • 515
    • 516
    • 517
    • 518
    • 519
    • 520
    • 521
    • 522
    • 523
    • 524
    • 525
    • 526
    • 527
    • 528
    • 529
    • 530
    • 531
    • 532
    • 533
    • 534
    • 535
    • 536
    • 537
    • 538
    • 539
    • 540
    • 541
    • 542
    • 543
    • 544
    • 545
    • 546
    • 547
    • 548
    • 549
    • 550
    • 551
    • 552
    • 553
    • 554
    • 555
    • 556
    • 557
    • 558
    • 559
    • 560
    • 561
    • 562
    • 563
    • 564
    • 565
    • 566
    • 567
    • 568
    • 569
    • 570
    • 571
    • 572
    • 573
    • 574
    • 575
    • 576
    • 577
    • 578
    • 579
    • 580
    • 581
    • 582
    • 583
    • 584
    • 585
    • 586
    • 587
    • 588
    • 589
    • 590
    • 591
    • 592
    • 593
    • 594
    • 595
    • 596
    • 597
    • 598
    • 599
    • 600
    • 601
    • 602
    • 603
    • 604
    • 605
    • 606
    • 607
    • 608
    • 609
    • 610
    • 611
    • 612
    • 613
    • 614
    • 615
    • 616
    • 617
    • 618
    • 619
    • 620
    • 621
    • 622
    • 623
    • 624
    • 625
    • 626
    • 627
    • 628
    • 629
    • 630
    • 631
    • 632
    • 633
    • 634
    • 635
    • 636
    • 637
    • 638
    • 639
    • 640
    • 641
    • 642
    • 643
    • 644
    • 645
    • 646
    • 647
    • 648
    • 649
    • 650
    • 651
    • 652
    • 653
    • 654
    • 655
    • 656
    • 657
    • 658
    • 659
    • 660
    • 661
    • 662
    • 663
    • 664
    • 665
    • 666
    • 667
    • 668
    • 669
    • 670
    • 671
    • 672
    • 673
    • 674
    • 675
    • 676
    • 677
    • 678
    • 679
    • 680
    • 681
    • 682
    • 683
    • 684
    • 685
    • 686
    • 687
    • 688
    • 689
    • 690
    • 691
    • 692
    • 693
    • 694
    • 695
    • 696
    • 697
    • 698
    • 699
    • 700
    • 701
    • 702
    • 703
    • 704
    • 705
    • 706
    • 707
    • 708
    • 709
    • 710
    • 711
    • 712
    • 713
    • 714
    • 715
    • 716
    • 717
    • 718
    • 719
    • 720
    • 721
    • 722
    • 723
    • 724
    • 725
    • 726
    • 727
    • 728
    • 729
    • 730
    • 731
    • 732
    • 733
    • 734
    • 735
    • 736
    • 737
    • 738
    • 739
    • 740
    • 741
    • 742
    • 743
    • 744
    • 745
    • 746
    • 747
    • 748
    • 749
    • 750
    • 751
    • 752
    • 753
    • 754
    • 755
    • 756
    • 757
    • 758
    • 759
    • 760
    • 761
    • 762
    • 763
    • 764
    • 765
    • 766
    • 767
    • 768
    • 769
    • 770
    • 771
    • 772
    • 773
    • 774
    • 775
    • 776
    • 777
    • 778
    • 779
    • 780
    • 781
    • 782
    • 783
    • 784
    • 785
    • 786
    • 787
    • 788
    • 789
    • 790
    • 791
    • 792
    • 793
    • 794
    • 795
    • 796
    • 797
    • 798
    • 799
    • 800
    • 801
    • 802
    • 803
    • 804
    • 805
    • 806
    • 807
    • 808
    • 809
    • 810
    • 811
    • 812
    • 813
    • 814
    • 815
    • 816
    • 817
    • 818
    • 819
    • 820
    • 821
    • 822
    • 823
    • 824
    • 825
    • 826
    • 827
    • 828
    • 829
    • 830
    • 831
    • 832
    • 833
    • 834
    • 835
    • 836
    • 837
    • 838
    • 839
    • 840
    • 841
    • 842
    • 843
    • 844
    • 845
    • 846
    • 847
    • 848
    • 849
    • 850
    • 851
    • 852
    • 853
    • 854
    • 855
    • 856
    • 857
    • 858
    • 859
    • 860
    • 861
    • 862
    • 863
    • 864
    • 865
    • 866
    • 867
    • 868
    • 869
    • 870
    • 871
    • 872
    • 873
    • 874
    • 875
    • 876
    • 877
    • 878
    • 879
    • 880
    • 881
    • 882
    • 883
    • 884
    • 885
    • 886
    • 887
    • 888
    • 889
    • 890
    • 891
    • 892
    • 893
    • 894
    • 895
    • 896
    • 897
    • 898
    • 899
    • 900
    • 901
    • 902
    • 903
    • 904
    • 905
    • 906
    • 907
    • 908
    • 909
    • 910
    • 911
    • 912
    • 913
    • 914
    • 915
    • 916
    • 917
    • 918
    • 919
    • 920
    • 921
    • 922
    • 923
    • 924
    • 925
    • 926
    • 927
    • 928
    • 929
    • 930
    • 931
    • 932
    • 933
    • 934
    • 935
    • 936
    • 937
    • 938
    • 939
    • 940
    • 941
    • 942
    • 943
    • 944
    • 945
    • 946
    • 947
    • 948
    • 949
    • 950
    • 951
    • 952
    • 953
    • 954
    • 955
    • 956
    • 957
    • 958
    • 959
    • 960
    • 961
    • 962
    • 963
    • 964
    • 965
    • 966
    • 967
    • 968
    • 969
    • 970
    • 971
    • 972
    • 973
    • 974
    • 975
    • 976
    • 977
    • 978
    • 979
    • 980
    • 981
    • 982
    • 983
    • 984
    • 985
    • 986
    • 987
    • 988
    • 989
    • 990
    • 991
    • 992
    • 993
    • 994
    • 995
    • 996
    • 997
    • 998
    • 999
    • 1000
    • 1001
    • 1002
    • 1003
    • 1004
    • 1005
    • 1006
    • 1007
    • 1008
    • 1009
    • 1010
    • 1011
    • 1012
    • 1013
    • 1014
    • 1015
    • 1016
    • 1017
    • 1018
    • 1019
    • 1020
    • 1021
    • 1022
    • 1023
    • 1024
    • 1025
    • 1026
    • 1027
    • 1028
    • 1029
    • 1030
    • 1031
    • 1032
    • 1033
    • 1034
    • 1035
    • 1036
    • 1037
    • 1038
    • 1039
    • 1040
    • 1041
    • 1042
    • 1043
    • 1044
    • 1045
    • 1046
    • 1047
    • 1048
    • 1049
    • 1050
    • 1051
    • 1052
    • 1053
    • 1054
    • 1055
    • 1056
    • 1057
    • 1058
    • 1059
    • 1060
    • 1061
    • 1062
    • 1063
    • 1064
    • 1065
    • 1066
    • 1067
    • 1068
    • 1069
    • 1070
    • 1071
    • 1072
    • 1073
    • 1074
    • 1075
    • 1076
    • 1077
    • 1078
    • 1079
    • 1080
    • 1081
    • 1082
    • 1083
    • 1084
    • 1085
    • 1086
    • 1087
    • 1088
    • 1089
    • 1090
    • 1091
    • 1092
    • 1093
    • 1094
    • 1095
    • 1096
    • 1097
    • 1098
    • 1099
    • 1100
    • 1101
    • 1102
    • 1103
    • 1104
    • 1105
    • 1106
    • 1107
    • 1108
    • 1109
    • 1110
    • 1111
    • 1112
    • 1113
    • 1114
    • 1115
    • 1116
    • 1117
    • 1118
    • 1119
    • 1120
    • 1121
    • 1122
    • 1123
    • 1124
    • 1125
    • 1126
    • 1127
    • 1128
    • 1129
    • 1130
    • 1131
    • 1132
    • 1133
    • 1134
    • 1135
    • 1136
    • 1137
    • 1138
    • 1139
    • 1140
    • 1141
    • 1142
    • 1143
    • 1144
    • 1145
    • 1146
    • 1147
    • 1148
    • 1149
    • 1150
    • 1151
    • 1152
    • 1153
    • 1154
    • 1155
    • 1156
    • 1157
    • 1158
    • 1159
    • 1160
    • 1161
    • 1162
    • 1163
    • 1164
    • 1165
    • 1166
    • 1167
    • 1168
    • 1169
    • 1170
    • 1171
    • 1172
    • 1173
    • 1174
    • 1175
    • 1176
    • 1177
  • 相关阅读:
    nmap使用指南
    笔记:Qt开发之定制化qDebug()函数
    推特营销|推特群推你有无限可能
    URL 路径中包含百分号需要在 Swift 中的特殊处理
    socket 到底是个啥
    推荐几个制作svg的工具
    uniapp微信小程序半屏跳转小程序
    Linux—软件管理
    LeetCode(30)长度最小的子数组【滑动窗口】【中等】
    【Git】如何在Vscode中使用码云(Gitee)实现远程代码仓库与本地同步?(新手图文教程)
  • 原文地址:https://blog.csdn.net/qq_39451578/article/details/133075297