Pārlūkot izejas kodu

revert debug changes

justheuristic 3 gadi atpakaļ
vecāks
revīzija
84532ee5f9
2 mainītis faili ar 37 papildinājumiem un 36 dzēšanām
  1. 32 32
      .github/workflows/run-tests.yaml
  2. 5 4
      tests/test_full_model.py

+ 32 - 32
.github/workflows/run-tests.yaml

@@ -6,41 +6,41 @@ on:
   pull_request:
 
 jobs:
-#  convert-model:
-#    runs-on: ubuntu-latest
-#    env:
-#      BLOOM_TESTING_WRITE_TOKEN: ${{ secrets.BLOOM_TESTING_WRITE_TOKEN }}
-#    timeout-minutes: 15
-#    steps:
-#      - uses: actions/checkout@v2
-#      - name: Set up Python
-#        uses: actions/setup-python@v2
-#        with:
-#          python-version: 3.9
-#      - name: Cache dependencies
-#        uses: actions/cache@v2
-#        with:
-#          path: ~/.cache/pip
-#          key: Key-v1-py3.9-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
-#      - name: Install dependencies
-#        run: |
-#          python -m pip install --upgrade pip
-#          pip install -r requirements.txt
-#      - name: Delete previous model, if exists
-#        run: |
-#          export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
-#          python -c "from huggingface_hub import delete_repo; delete_repo(token='$BLOOM_TESTING_WRITE_TOKEN', \
-#          name='test-bloomd-350m-$HF_TAG', organization='bloom-testing')" || true
-#      - name: Convert model and push to hub
-#        run: |
-#          export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
-#          python -m cli.convert_model --model bigscience/bloom-350m  --output_path ./converted_model \
-#            --output_repo bloom-testing/test-bloomd-350m-$HF_TAG --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
-#
+  convert-model:
+    runs-on: ubuntu-latest
+    env:
+      BLOOM_TESTING_WRITE_TOKEN: ${{ secrets.BLOOM_TESTING_WRITE_TOKEN }}
+    timeout-minutes: 15
+    steps:
+      - uses: actions/checkout@v2
+      - name: Set up Python
+        uses: actions/setup-python@v2
+        with:
+          python-version: 3.9
+      - name: Cache dependencies
+        uses: actions/cache@v2
+        with:
+          path: ~/.cache/pip
+          key: Key-v1-py3.9-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
+      - name: Install dependencies
+        run: |
+          python -m pip install --upgrade pip
+          pip install -r requirements.txt
+      - name: Delete previous model, if exists
+        run: |
+          export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
+          python -c "from huggingface_hub import delete_repo; delete_repo(token='$BLOOM_TESTING_WRITE_TOKEN', \
+          name='test-bloomd-350m-$HF_TAG', organization='bloom-testing')" || true
+      - name: Convert model and push to hub
+        run: |
+          export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
+          python -m cli.convert_model --model bigscience/bloom-350m  --output_path ./converted_model \
+            --output_repo bloom-testing/test-bloomd-350m-$HF_TAG --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
+
 
   run-tests:
     runs-on: ubuntu-latest
-#    needs: convert-model
+    needs: convert-model
     strategy:
       matrix:
         python-version: [ 3.7, 3.8, 3.9 ]

+ 5 - 4
tests/test_full_model.py

@@ -2,7 +2,6 @@ import pytest
 import torch
 import transformers
 from hivemind import get_logger, use_hivemind_log_handler
-
 from test_utils import *
 
 from src.client.remote_model import DistributedBloomForCausalLM
@@ -14,8 +13,9 @@ logger = get_logger(__file__)
 @pytest.mark.forked
 def test_full_model_exact_match(atol_forward=1e-3, atol_inference=1e-3):
     tokenizer = transformers.BloomTokenizerFast.from_pretrained(MODEL_NAME)
-    model = DistributedBloomForCausalLM.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS,
-                                                        low_cpu_mem_usage=True, torch_dtype=torch.float32)
+    model = DistributedBloomForCausalLM.from_pretrained(
+        MODEL_NAME, initial_peers=INITIAL_PEERS, low_cpu_mem_usage=True, torch_dtype=torch.float32
+    )
     assert isinstance(model, DistributedBloomForCausalLM)
     assert len(model.transformer.h) == model.config.n_layer
 
@@ -42,7 +42,8 @@ def test_full_model_exact_match(atol_forward=1e-3, atol_inference=1e-3):
 
         if REF_NAME:
             ref_model = transformers.BloomForCausalLM.from_pretrained(
-                REF_NAME, low_cpu_mem_usage=True, torch_dtype=torch.float32)
+                REF_NAME, low_cpu_mem_usage=True, torch_dtype=torch.float32
+            )
             dummy_mask = torch.ones_like(test_inputs, dtype=torch.bool)
             # note: this creates a dummy mask to make the test compatible with older transformer versions
             # prior to https://github.com/huggingface/transformers/pull/17837