run-tests.yaml 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. name: Tests
  2. on:
  3. push:
  4. branches: [ main ]
  5. pull_request:
  6. jobs:
  7. convert-model:
  8. runs-on: ubuntu-latest
  9. env:
  10. BLOOM_TESTING_WRITE_TOKEN: ${{ secrets.BLOOM_TESTING_WRITE_TOKEN }}
  11. timeout-minutes: 15
  12. steps:
  13. - uses: actions/checkout@v2
  14. - name: Set up Python
  15. uses: actions/setup-python@v2
  16. with:
  17. python-version: 3.9
  18. - name: Cache dependencies
  19. uses: actions/cache@v2
  20. with:
  21. path: ~/.cache/pip
  22. key: Key-v1-py3.9-${{ hashFiles('setup.cfg') }}
  23. - name: Install dependencies
  24. run: |
  25. python -m pip install --upgrade pip
  26. pip install .
  27. - name: Delete any test models older than 1 week
  28. run: |
  29. python tests/scripts/remove_old_models.py --author bloom-testing --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
  30. - name: Delete previous version of this model, if exists
  31. run: |
  32. export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
  33. python -c "from huggingface_hub import delete_repo; delete_repo(token='$BLOOM_TESTING_WRITE_TOKEN', \
  34. name='test-bloomd-560m-$HF_TAG', organization='bloom-testing')" || true
  35. - name: Convert model and push to hub
  36. run: |
  37. export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
  38. python -m petals.cli.convert_model --model bigscience/bloom-560m --output_path ./converted_model \
  39. --output_repo bloom-testing/test-bloomd-560m-$HF_TAG --use_auth_token $BLOOM_TESTING_WRITE_TOKEN \
  40. --resize_token_embeddings 50000
  41. run-tests:
  42. runs-on: ubuntu-latest
  43. needs: convert-model
  44. strategy:
  45. matrix:
  46. python-version: [ 3.7, 3.8, 3.9 ]
  47. fail-fast: false
  48. timeout-minutes: 15
  49. steps:
  50. - uses: actions/checkout@v2
  51. - name: Set up Python
  52. uses: actions/setup-python@v2
  53. with:
  54. python-version: ${{ matrix.python-version }}
  55. - name: Cache dependencies
  56. uses: actions/cache@v2
  57. with:
  58. path: ~/.cache/pip
  59. key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }}
  60. - name: Install dependencies
  61. run: |
  62. python -m pip install --upgrade pip
  63. pip install .[dev]
  64. - name: Test
  65. run: |
  66. export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
  67. export MODEL_NAME=bloom-testing/test-bloomd-560m-$HF_TAG
  68. export REF_NAME=bigscience/bloom-560m
  69. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:12 \
  70. --new_swarm --identity tests/test.id --host_maddrs /ip4/127.0.0.1/tcp/31337 --throughput 1 \
  71. --torch_dtype float32 --compression NONE --attn_cache_size 0.2GiB &> server1.log &
  72. SERVER1_PID=$!
  73. sleep 5 # wait for the first server to initialize DHT
  74. export INITIAL_PEERS=/ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g
  75. # ^-- server 1 multiaddr is determined by --identity and --host_maddrs
  76. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 12:22 \
  77. --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server2.log &
  78. SERVER2_PID=$!
  79. sleep 10 # wait for initial servers to declare blocks, then let server decide which blocks to serve
  80. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:6 \
  81. --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server3.log &
  82. SERVER3_PID=$!
  83. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 4:16 \
  84. --torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server4.log &
  85. SERVER4_PID=$!
  86. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --num_blocks 3 \
  87. --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server5.log &
  88. SERVER5_PID=$!
  89. tail -n 100 -f server*.log &
  90. LOGGER_PID=$!
  91. sleep 30 # wait for servers to download layers
  92. kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived init
  93. PYTHONPATH=. pytest tests --durations=0 --durations-min=1.0 -v
  94. kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived tests
  95. kill -s SIGINT $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID $LOGGER_PID
  96. echo "Done!"