run-tests.yaml 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. name: Tests
  2. on:
  3. push:
  4. branches: [ main ]
  5. pull_request:
  6. jobs:
  7. run-tests:
  8. runs-on: ubuntu-latest
  9. strategy:
  10. matrix:
  11. include:
  12. - { model: 'bigscience/bloom-560m', python-version: '3.8' }
  13. - { model: 'bigscience/bloom-560m', python-version: '3.9' }
  14. - { model: 'bigscience/bloom-560m', python-version: '3.10' }
  15. - { model: 'bigscience/bloom-560m', python-version: '3.11' }
  16. - { model: 'Maykeye/TinyLLama-v0', python-version: '3.8' }
  17. - { model: 'Maykeye/TinyLLama-v0', python-version: '3.11' }
  18. fail-fast: false
  19. timeout-minutes: 15
  20. steps:
  21. - name: Increase swap space
  22. uses: pierotofy/set-swap-space@master
  23. with:
  24. swap-size-gb: 10
  25. - name: Checkout
  26. uses: actions/checkout@v3
  27. - name: Set up Python
  28. uses: actions/setup-python@v3
  29. with:
  30. python-version: ${{ matrix.python-version }}
  31. - name: Cache dependencies
  32. uses: actions/cache@v3
  33. with:
  34. path: ~/.cache/pip
  35. key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }}
  36. - name: Install dependencies
  37. run: |
  38. python -m pip install --upgrade pip
  39. pip install .[dev]
  40. - name: Test
  41. run: |
  42. export MODEL_NAME="${{ matrix.model }}"
  43. export REF_NAME="${{ matrix.model }}"
  44. export ADAPTER_NAME="${{ matrix.model == 'bigscience/bloom-560m' && 'artek0chumak/bloom-560m-safe-peft' || '' }}"
  45. export TENSOR_PARALLEL_ARGS="${{ matrix.model == 'bigscience/bloom-560m' && '--tensor_parallel_devices cpu cpu' || '' }}"
  46. # [Step 1] Watch free RAM (lack of RAM is a common issue in CI)
  47. bash -c 'while true; do free -h && sleep 30s; done' &
  48. RAM_WATCH_PID=$!
  49. # [Step 2] Set up a tiny test swarm (see https://github.com/bigscience-workshop/petals/wiki/Launch-your-own-swarm)
  50. python -m petals.cli.run_dht \
  51. --identity_path tests/bootstrap.id --host_maddrs /ip4/127.0.0.1/tcp/31337 &> bootstrap.log &
  52. BOOTSTRAP_PID=$!
  53. export INITIAL_PEERS=/ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g
  54. # ^-- multiaddr in INITIAL_PEERS is determined by --identity_path and --host_maddrs
  55. sleep 5 # wait for DHT init
  56. python -m petals.cli.run_server $MODEL_NAME --adapters $ADAPTER_NAME --torch_dtype float32 --num_blocks 5 \
  57. --mean_balance_check_period 10 \
  58. --initial_peers $INITIAL_PEERS --throughput 1 &> server1.log &
  59. SERVER1_PID=$!
  60. # ^-- rebalacing test: this server chooses blocks 0:5, then sees a gap in the swarm and moves there
  61. sleep 10 # wait for the 1st server to choose blocks
  62. python -m petals.cli.run_server $MODEL_NAME --adapters $ADAPTER_NAME --torch_dtype float32 --block_indices 0:5 \
  63. --identity_path tests/server2.id \
  64. --initial_peers $INITIAL_PEERS --throughput 1 &> server2.log &
  65. SERVER2_PID=$!
  66. python -m petals.cli.run_server $MODEL_NAME --adapters $ADAPTER_NAME --torch_dtype float32 --num_blocks 14 \
  67. --attn_cache_tokens 2048 --max_chunk_size_bytes 1024 \
  68. --initial_peers $INITIAL_PEERS --throughput auto &> server3.log &
  69. SERVER3_PID=$!
  70. # ^-- chunking test
  71. python -m petals.cli.run_server $MODEL_NAME $TENSOR_PARALLEL_ARGS --torch_dtype float32 --block_indices 0:2 \
  72. --initial_peers $INITIAL_PEERS --throughput auto &> server4.log &
  73. SERVER4_PID=$!
  74. # ^-- tensor parallelism test (not compatible with adapters yet)
  75. sleep 5 # wait for the log files to appear
  76. tail -n 100 -f bootstrap.log server*.log &
  77. LOGGER_PID=$!
  78. sleep 30 # wait for servers to eval throughput, download layers, and rebalance
  79. kill -0 $BOOTSTRAP_PID $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID # ensure all peers survived init
  80. # [Step 3] Run PyTest
  81. pytest tests --durations=0 --durations-min=1.0 -v
  82. # [Step 4] Check if benchmarks work (their results here are meaningless since it's a tiny swarm of CPU servers)
  83. python benchmarks/benchmark_inference.py --model $MODEL_NAME --initial_peers $INITIAL_PEERS --torch_dtype float32 \
  84. --seq_len 3
  85. python benchmarks/benchmark_forward.py --model $MODEL_NAME --initial_peers $INITIAL_PEERS --torch_dtype float32 \
  86. --seq_len 3 --batch_size 3 --n_steps 1
  87. python benchmarks/benchmark_training.py --model $MODEL_NAME --initial_peers $INITIAL_PEERS --torch_dtype float32 \
  88. --seq_len 3 --batch_size 3 --pre_seq_len 1 --n_steps 1 --task cls
  89. python benchmarks/benchmark_training.py --model $MODEL_NAME --initial_peers $INITIAL_PEERS --torch_dtype float32 \
  90. --seq_len 3 --batch_size 3 --pre_seq_len 1 --n_steps 1 --task causal_lm
  91. # [Step 5] Clean up
  92. kill -0 $BOOTSTRAP_PID $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID # ensure all peers survived tests
  93. kill -s SIGINT $BOOTSTRAP_PID $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $LOGGER_PID $RAM_WATCH_PID
  94. echo "Done!"