run-tests.yaml 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. name: Tests
  2. on:
  3. push:
  4. branches: [ main ]
  5. pull_request:
  6. jobs:
  7. convert-model:
  8. runs-on: ubuntu-latest
  9. env:
  10. BLOOM_TESTING_WRITE_TOKEN: ${{ secrets.BLOOM_TESTING_WRITE_TOKEN }}
  11. timeout-minutes: 15
  12. steps:
  13. - name: Checkout
  14. uses: actions/checkout@v3
  15. - name: Check if the model is cached
  16. id: cache-model
  17. uses: actions/cache@v3
  18. with:
  19. path: ~/.dummy
  20. key: model-v1-${{ hashFiles('setup.cfg', 'src/petals/cli/convert_model.py') }}
  21. - name: Set up Python
  22. if: steps.cache-model.outputs.cache-hit != 'true'
  23. uses: actions/setup-python@v2
  24. with:
  25. python-version: 3.9
  26. - name: Cache dependencies
  27. if: steps.cache-model.outputs.cache-hit != 'true'
  28. uses: actions/cache@v3
  29. with:
  30. path: ~/.cache/pip
  31. key: Key-v1-3.9-${{ hashFiles('setup.cfg') }}
  32. - name: Install dependencies
  33. if: steps.cache-model.outputs.cache-hit != 'true'
  34. run: |
  35. python -m pip install --upgrade pip
  36. pip install .
  37. - name: Delete any test models older than 1 week
  38. if: steps.cache-model.outputs.cache-hit != 'true'
  39. run: |
  40. python tests/scripts/remove_old_models.py --author bloom-testing --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
  41. - name: Delete previous version of this model, if exists
  42. if: steps.cache-model.outputs.cache-hit != 'true'
  43. run: |
  44. export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
  45. python -c "from huggingface_hub import delete_repo; delete_repo(token='$BLOOM_TESTING_WRITE_TOKEN', \
  46. repo_id='bloom-testing/test-bloomd-560m-$HF_TAG')" || true
  47. - name: Convert model and push to hub
  48. if: steps.cache-model.outputs.cache-hit != 'true'
  49. run: |
  50. export HF_TAG=${{ hashFiles('setup.cfg', 'src/petals/cli/convert_model.py') }}
  51. python -m petals.cli.convert_model --model bigscience/bloom-560m --output_path ./converted_model \
  52. --output_repo bloom-testing/test-bloomd-560m-$HF_TAG --use_auth_token $BLOOM_TESTING_WRITE_TOKEN \
  53. --resize_token_embeddings 50000
  54. run-tests:
  55. runs-on: ubuntu-latest
  56. needs: convert-model
  57. strategy:
  58. matrix:
  59. python-version: [ 3.7, 3.8, 3.9 ]
  60. fail-fast: false
  61. timeout-minutes: 15
  62. steps:
  63. - name: Checkout
  64. uses: actions/checkout@v3
  65. - name: Set up Python
  66. uses: actions/setup-python@v2
  67. with:
  68. python-version: ${{ matrix.python-version }}
  69. - name: Cache dependencies
  70. uses: actions/cache@v3
  71. with:
  72. path: ~/.cache/pip
  73. key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }}
  74. - name: Install dependencies
  75. run: |
  76. python -m pip install --upgrade pip
  77. pip install .[dev]
  78. - name: Test
  79. run: |
  80. export HF_TAG=${{ hashFiles('setup.cfg', 'src/petals/cli/convert_model.py') }}
  81. export MODEL_NAME=bloom-testing/test-bloomd-560m-$HF_TAG
  82. export REF_NAME=bigscience/bloom-560m
  83. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:12 \
  84. --new_swarm --identity tests/test.id --host_maddrs /ip4/127.0.0.1/tcp/31337 --throughput 1 \
  85. --torch_dtype float32 --compression NONE --attn_cache_size 0.2GiB &> server1.log &
  86. SERVER1_PID=$!
  87. sleep 5 # wait for the first server to initialize DHT
  88. export INITIAL_PEERS=/ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g
  89. # ^-- server 1 multiaddr is determined by --identity and --host_maddrs
  90. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 12:22 \
  91. --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server2.log &
  92. SERVER2_PID=$!
  93. sleep 10 # wait for initial servers to declare blocks, then let server decide which blocks to serve
  94. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:5 \
  95. --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server3.log &
  96. SERVER3_PID=$!
  97. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 4:14 \
  98. --torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server4.log &
  99. SERVER4_PID=$!
  100. python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --num_blocks 3 \
  101. --initial_peers $INITIAL_PEERS --throughput 1 --tensor_parallel_devices cpu cpu --torch_dtype float32 &> server5.log &
  102. SERVER5_PID=$!
  103. tail -n 100 -f server*.log &
  104. LOGGER_PID=$!
  105. sleep 30 # wait for servers to download layers
  106. kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived init
  107. pytest tests --durations=0 --durations-min=1.0 -v
  108. kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived tests
  109. kill -s SIGINT $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID $LOGGER_PID
  110. echo "Done!"