ソースを参照

Fix readme, notebook, workflows

Aleksandr Borzunov 2 年 前
コミット
e2a43476a6

+ 10 - 11
.github/workflows/run-tests.yaml

@@ -21,11 +21,11 @@ jobs:
         uses: actions/cache@v2
         with:
           path: ~/.cache/pip
-          key: Key-v1-py3.9-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
+          key: Key-v1-py3.9-${{ hashFiles('setup.cfg') }}
       - name: Install dependencies
         run: |
           python -m pip install --upgrade pip
-          pip install -r requirements.txt
+          pip install .
       - name: Delete any test models older than 1 week
         run: |
           python tests/scripts/remove_old_models.py --author bloom-testing --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
@@ -37,7 +37,7 @@ jobs:
       - name: Convert model and push to hub
         run: |
           export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
-          python -m cli.convert_model --model bigscience/bloom-560m  --output_path ./converted_model \
+          python -m petals.cli.convert_model --model bigscience/bloom-560m  --output_path ./converted_model \
             --output_repo bloom-testing/test-bloomd-560m-$HF_TAG --use_auth_token $BLOOM_TESTING_WRITE_TOKEN \
             --resize_token_embeddings 50000
 
@@ -59,19 +59,18 @@ jobs:
         uses: actions/cache@v2
         with:
           path: ~/.cache/pip
-          key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
+          key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }}
       - name: Install dependencies
         run: |
           python -m pip install --upgrade pip
-          pip install -r requirements.txt
-          pip install -r requirements-dev.txt
+          pip install .[dev]
       - name: Test
         run: |
           export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
           export MODEL_NAME=bloom-testing/test-bloomd-560m-$HF_TAG
           export REF_NAME=bigscience/bloom-560m
 
-          python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:12 \
+          python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:12 \
             --new_swarm --identity tests/test.id --host_maddrs /ip4/127.0.0.1/tcp/31337 --throughput 1 \
             --torch_dtype float32 --compression NONE --attn_cache_size 0.2GiB &> server1.log &
           SERVER1_PID=$!
@@ -81,21 +80,21 @@ jobs:
           export INITIAL_PEERS=/ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g
           # ^-- server 1 multiaddr is determined by --identity and --host_maddrs
 
-          python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 12:22 \
+          python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 12:22 \
             --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server2.log &
           SERVER2_PID=$!
 
           sleep 10 # wait for initial servers to declare blocks, then let server decide which blocks to serve
 
-          python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:6 \
+          python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:6 \
             --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server3.log &
           SERVER3_PID=$!
 
-          python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 4:16 \
+          python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 4:16 \
             --torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server4.log &
           SERVER4_PID=$!
 
-          python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --num_blocks 3 \
+          python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --num_blocks 3 \
             --initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server5.log &
           SERVER5_PID=$!
 

+ 9 - 10
README.md

@@ -85,10 +85,10 @@ This is important because it's technically possible for peers serving model laye
 
 ## Installation
 
-Here's how to install the dependencies with conda:
+Here's how to install Petals with conda:
 ```
 conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch
-pip install -r requirements.txt
+pip install git+https://github.com/bigscience-workshop/petals
 ```
 
 This script uses Anaconda to install cuda-enabled PyTorch.
@@ -107,7 +107,7 @@ For a detailed instruction with larger models, see ["Launch your own swarm"](htt
 
 First, run a couple of servers, each in a separate shell. To launch your first server, run:
 ```bash
-python -m cli.run_server bloom-testing/test-bloomd-560m-main --num_blocks 8 --torch_dtype float32 \
+python -m petals.cli.run_server bloom-testing/test-bloomd-560m-main --num_blocks 8 --torch_dtype float32 \
   --host_maddrs /ip4/127.0.0.1/tcp/31337   # use port 31337, local connections only
 ```
 
@@ -124,7 +124,7 @@ Mon Day 01:23:45.678 [INFO] Running DHT node on ['/ip4/127.0.0.1/tcp/31337/p2p/A
 You can use this address (`/ip4/whatever/else`) to connect additional servers. Open another terminal and run:
 
 ```bash
-python -m cli.run_server bloom-testing/test-bloomd-560m-main --num_blocks 8 --torch_dtype float32 \
+python -m petals.cli.run_server bloom-testing/test-bloomd-560m-main --num_blocks 8 --torch_dtype float32 \
   --host_maddrs /ip4/127.0.0.1/tcp/0 \
   --initial_peers /ip4/127.0... # <-- TODO: Copy the address of another server here
 # e.g. --initial_peers /ip4/127.0.0.1/tcp/31337/p2p/QmS1GecIfYouAreReadingThisYouNeedToCopyYourServerAddressCBBq
@@ -140,11 +140,10 @@ Once your have enough servers, you can use them to train and/or inference the mo
 ```python
 import torch
 import torch.nn.functional as F
-import transformers
-from src import DistributedBloomForCausalLM
+from petals.client import BloomTokenizerFast, DistributedBloomForCausalLM
 
 initial_peers = [TODO_put_one_or_more_server_addresses_here]  # e.g. ["/ip4/127.0.0.1/tcp/more/stuff/here"]
-tokenizer = transformers.BloomTokenizerFast.from_pretrained("bloom-testing/test-bloomd-560m-main")
+tokenizer = BloomTokenizerFast.from_pretrained("bloom-testing/test-bloomd-560m-main")
 model = DistributedBloomForCausalLM.from_pretrained(
   "bloom-testing/test-bloomd-560m-main", initial_peers=initial_peers, low_cpu_mem_usage=True, torch_dtype=torch.float32
 )  # this model has only embeddings / logits, all transformer blocks rely on remote servers
@@ -177,14 +176,14 @@ To run minimalistic tests, spin up some servers:
 ```bash
 export MODEL_NAME=bloom-testing/test-bloomd-560m-main
 export INITIAL_PEERS=/ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g
-python -m cli.run_server $MODEL_NAME --block_indices 0:12 --throughput 1 --torch_dtype float32 \
+python -m petals.cli.run_server $MODEL_NAME --block_indices 0:12 --throughput 1 --torch_dtype float32 \
   --identity tests/test.id --host_maddrs /ip4/127.0.0.1/tcp/31337  &> server1.log &
 sleep 5  # wait for the first server to initialize DHT
-python -m cli.run_server $MODEL_NAME --block_indices 12:24 --throughput 1 --torch_dtype float32 \
+python -m petals.cli.run_server $MODEL_NAME --block_indices 12:24 --throughput 1 --torch_dtype float32 \
   --initial_peers /ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g &> server2.log &
 
 tail -f server1.log server2.log  # view logs for both servers
-# after you're done, kill servers with 'pkill -f cli.run_server'
+# after you're done, kill servers with 'pkill -f petals.cli.run_server'
 ```
 
 Then launch pytest:

+ 8 - 15
examples/prompt-tuning-personachat.ipynb

@@ -36,22 +36,15 @@
     "import subprocess\n",
     "import sys\n",
     "\n",
+    "!pip install -r git+https://github.com/bigscience-workshop/petals\n",
+    "!pip install datasets wandb\n",
     "\n",
     "IN_COLAB = 'google.colab' in sys.modules\n",
-    "\n",
-    "if IN_COLAB:\n",
-    "    subprocess.run(\"git clone https://github.com/bigscience-workshop/petals\", shell=True)\n",
-    "    subprocess.run(\"pip install -r petals/requirements.txt\", shell=True)\n",
-    "    subprocess.run(\"pip install datasets wandb\", shell=True)\n",
-    "\n",
+    "if IN_COLAB:  # Remove CUDA binaries on CPU-only colabs to not confuse bitsandbytes\n",
     "    try:\n",
     "        subprocess.check_output([\"nvidia-smi\", \"-L\"])\n",
     "    except subprocess.CalledProcessError as e:\n",
-    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)\n",
-    "\n",
-    "    sys.path.insert(0, './petals/')\n",
-    "else:\n",
-    "    sys.path.insert(0, \"..\")"
+    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)"
    ]
   },
   {
@@ -74,7 +67,7 @@
     "from transformers import get_scheduler\n",
     "\n",
     "# Import a Petals model\n",
-    "from src.client.remote_model import DistributedBloomForCausalLM"
+    "from petals.client.remote_model import DistributedBloomForCausalLM"
    ]
   },
   {
@@ -314,7 +307,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3.8.0 ('petals')",
+   "display_name": "Python 3.6.9 64-bit",
    "language": "python",
    "name": "python3"
   },
@@ -328,11 +321,11 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.0"
+   "version": "3.6.9"
   },
   "vscode": {
    "interpreter": {
-    "hash": "a303c9f329a09f921588ea6ef03898c90b4a8e255a47e0bd6e36f6331488f609"
+    "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
    }
   }
  },

+ 6 - 13
examples/prompt-tuning-sst2.ipynb

@@ -36,22 +36,15 @@
     "import subprocess\n",
     "import sys\n",
     "\n",
+    "!pip install -r git+https://github.com/bigscience-workshop/petals\n",
+    "!pip install datasets wandb\n",
     "\n",
     "IN_COLAB = 'google.colab' in sys.modules\n",
-    "\n",
-    "if IN_COLAB:\n",
-    "    subprocess.run(\"git clone https://github.com/bigscience-workshop/petals\", shell=True)\n",
-    "    subprocess.run(\"pip install -r petals/requirements.txt\", shell=True)\n",
-    "    subprocess.run(\"pip install datasets wandb\", shell=True)\n",
-    "\n",
+    "if IN_COLAB:  # Remove CUDA binaries on CPU-only colabs to not confuse bitsandbytes\n",
     "    try:\n",
     "        subprocess.check_output([\"nvidia-smi\", \"-L\"])\n",
     "    except subprocess.CalledProcessError as e:\n",
-    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)\n",
-    "\n",
-    "    sys.path.insert(0, './petals/')\n",
-    "else:\n",
-    "    sys.path.insert(0, \"..\")"
+    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)"
    ]
   },
   {
@@ -74,7 +67,7 @@
     "from transformers import get_scheduler\n",
     "\n",
     "# Import a Petals model\n",
-    "from src.client.remote_model import DistributedBloomForSequenceClassification"
+    "from petals.client.remote_model import DistributedBloomForSequenceClassification"
    ]
   },
   {
@@ -313,7 +306,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.9"
+   "version": "3.6.9"
   },
   "vscode": {
    "interpreter": {