浏览代码

Fix scripts and notebooks

Aleksandr Borzunov 2 年之前
父节点
当前提交
67f96d49cf

+ 4 - 5
.github/workflows/run-tests.yaml

@@ -21,11 +21,11 @@ jobs:
         uses: actions/cache@v2
         with:
           path: ~/.cache/pip
-          key: Key-v1-py3.9-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
+          key: Key-v1-py3.9-${{ hashFiles('setup.cfg') }}
       - name: Install dependencies
         run: |
           python -m pip install --upgrade pip
-          pip install -r requirements.txt
+          pip install .
       - name: Delete any test models older than 1 week
         run: |
           python tests/scripts/remove_old_models.py --author bloom-testing --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
@@ -59,12 +59,11 @@ jobs:
         uses: actions/cache@v2
         with:
           path: ~/.cache/pip
-          key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
+          key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }}
       - name: Install dependencies
         run: |
           python -m pip install --upgrade pip
-          pip install -r requirements.txt
-          pip install -r requirements-dev.txt
+          pip install .[dev]
       - name: Test
         run: |
           export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")

+ 2 - 2
README.md

@@ -85,10 +85,10 @@ This is important because it's technically possible for peers serving model laye
 
 ## Installation
 
-Here's how to install the dependencies with conda:
+Here's how to install Petals with conda:
 ```
 conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch
-pip install -r requirements.txt
+pip install git+https://github.com/bigscience-workshop/petals
 ```
 
 This script uses Anaconda to install cuda-enabled PyTorch.

+ 2 - 2
cli/deploy_server.sh

@@ -32,7 +32,7 @@ while getopts ":m:i:d:p:b:a:t:" option; do
             ;;
         b)  BLOCK_IDS=${OPTARG}
             ;;
-        a)  HOST_MADDR=${OPTARG} # TODO: allow several maddrs 
+        a)  HOST_MADDR=${OPTARG} # TODO: allow several maddrs
             ;;
         t)  RUN_LOCAL_TESTS=true
             ;;
@@ -67,7 +67,7 @@ else
 
     conda install -y -c conda-forge cudatoolkit-dev==11.3.1 cudatoolkit==11.3.1 cudnn==8.2.1.32
     pip install -i https://pypi.org/simple torch==1.12.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html
-    pip install -i https://pypi.org/simple -r requirements.txt
+    pip install -i https://pypi.org/simple .
     pip install -i https://test.pypi.org/simple/ bitsandbytes-cuda113
 fi
 

+ 5 - 5
cli/run_local_servers.sh

@@ -40,7 +40,7 @@ else
 
     conda install -y -c conda-forge cudatoolkit-dev==11.3.1 cudatoolkit==11.3.1 cudnn==8.2.1.32
     pip install -i https://pypi.org/simple torch==1.12.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html
-    pip install -i https://pypi.org/simple -r requirements.txt
+    pip install -i https://pypi.org/simple -r .
     pip install -i https://test.pypi.org/simple/ bitsandbytes-cuda113
 fi
 
@@ -59,7 +59,7 @@ echo "Initial peer: ${INITIAL_PEER}"
 # Initialize the config file #
 ##############################
 
-typeset -A cfg 
+typeset -A cfg
 cfg=( # set default values in config array
     [device]="cpu"
     [block_ids]="1:2"
@@ -72,7 +72,7 @@ cfg=( # set default values in config array
 ###############
 
 for SERVER_ID in $(seq 0 $(( $NUM_SERVERS - 1 )) )
-do  
+do
     ###############
     # Read config #
     ###############
@@ -85,14 +85,14 @@ do
             cfg[$varname]=$(echo "$line" | cut -d '=' -f 2-)
         fi
     done < ${CONFIG_PATH}/server_${SERVER_ID}.cfg
-    
+
     echo "=== Server #${SERVER_ID} ==="
     echo "Server ID: ${cfg[id_path]}"
     echo "Device: ${cfg[device]}"
     echo "Bloom block ids: ${cfg[block_ids]}"
     echo "Host maddr: ${cfg[maddr]}"
     echo ""
-    
+
     ##############
     # Run server #
     ##############

+ 6 - 6
cli/run_remote_servers.sh

@@ -45,7 +45,7 @@ else
 
     conda install -y -c conda-forge cudatoolkit-dev==11.3.1 cudatoolkit==11.3.1 cudnn==8.2.1.32
     pip install -i https://pypi.org/simple torch==1.12.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html
-    pip install -i https://pypi.org/simple -r requirements.txt
+    pip install -i https://pypi.org/simple -r .
 fi
 
 
@@ -65,7 +65,7 @@ echo "Initial peer: ${INITIAL_PEER}"
 # Initialize the config file #
 ##############################
 
-typeset -A cfg 
+typeset -A cfg
 cfg=( # set default values in config array
     [name]=""
     [device]="cpu"
@@ -79,7 +79,7 @@ cfg=( # set default values in config array
 ###############
 
 for SERVER_ID in $(seq 0 $(( $NUM_SERVERS - 1 )) )
-do  
+do
     ###############
     # Read config #
     ###############
@@ -92,7 +92,7 @@ do
             cfg[$varname]=$(echo "$line" | cut -d '=' -f 2-)
         fi
     done < ${CONFIG_PATH}/server_${SERVER_ID}.cfg
-    
+
     SERVER_NAME="${USERNAME}@${cfg[name]}"
     echo "=== Server #${SERVER_ID} ==="
     echo "Server name ${SERVER_NAME}"
@@ -101,10 +101,10 @@ do
     echo "Bloom block ids: ${cfg[block_ids]}"
     echo "Host maddr: ${cfg[maddr]}"
     echo "================="
-    
+
     ##############
     # Run server #
     ##############
-     
+
     ssh -i ${SSH_KEY_PATH} ${SERVER_NAME} "tmux new-session -d -s 'Server_${SERVER_ID}' 'cd bloom-demo && bash cli/deploy_server.sh -i ${INITIAL_PEER} -d ${cfg[device]} -p ${cfg[id_path]} -b ${cfg[block_ids]} -a ${cfg[maddr]}'"
 done

+ 7 - 14
examples/prompt-tuning-personachat.ipynb

@@ -36,22 +36,15 @@
     "import subprocess\n",
     "import sys\n",
     "\n",
+    "!pip install -r git+https://github.com/bigscience-workshop/petals\n",
+    "!pip install datasets wandb\n",
     "\n",
     "IN_COLAB = 'google.colab' in sys.modules\n",
-    "\n",
-    "if IN_COLAB:\n",
-    "    subprocess.run(\"git clone https://github.com/bigscience-workshop/petals\", shell=True)\n",
-    "    subprocess.run(\"pip install -r petals/requirements.txt\", shell=True)\n",
-    "    subprocess.run(\"pip install datasets wandb\", shell=True)\n",
-    "\n",
+    "if IN_COLAB:  # Remove CUDA binaries on CPU-only colabs to not confuse bitsandbytes\n",
     "    try:\n",
     "        subprocess.check_output([\"nvidia-smi\", \"-L\"])\n",
     "    except subprocess.CalledProcessError as e:\n",
-    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)\n",
-    "\n",
-    "    sys.path.insert(0, './petals/')\n",
-    "else:\n",
-    "    sys.path.insert(0, \"..\")"
+    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)"
    ]
   },
   {
@@ -314,7 +307,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3.8.0 ('petals')",
+   "display_name": "Python 3.6.9 64-bit",
    "language": "python",
    "name": "python3"
   },
@@ -328,11 +321,11 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.0"
+   "version": "3.6.9"
   },
   "vscode": {
    "interpreter": {
-    "hash": "a303c9f329a09f921588ea6ef03898c90b4a8e255a47e0bd6e36f6331488f609"
+    "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
    }
   }
  },

+ 5 - 12
examples/prompt-tuning-sst2.ipynb

@@ -36,22 +36,15 @@
     "import subprocess\n",
     "import sys\n",
     "\n",
+    "!pip install -r git+https://github.com/bigscience-workshop/petals\n",
+    "!pip install datasets wandb\n",
     "\n",
     "IN_COLAB = 'google.colab' in sys.modules\n",
-    "\n",
-    "if IN_COLAB:\n",
-    "    subprocess.run(\"git clone https://github.com/bigscience-workshop/petals\", shell=True)\n",
-    "    subprocess.run(\"pip install -r petals/requirements.txt\", shell=True)\n",
-    "    subprocess.run(\"pip install datasets wandb\", shell=True)\n",
-    "\n",
+    "if IN_COLAB:  # Remove CUDA binaries on CPU-only colabs to not confuse bitsandbytes\n",
     "    try:\n",
     "        subprocess.check_output([\"nvidia-smi\", \"-L\"])\n",
     "    except subprocess.CalledProcessError as e:\n",
-    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)\n",
-    "\n",
-    "    sys.path.insert(0, './petals/')\n",
-    "else:\n",
-    "    sys.path.insert(0, \"..\")"
+    "        subprocess.run(\"rm -r /usr/local/cuda/lib64\", shell=True)"
    ]
   },
   {
@@ -313,7 +306,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.9"
+   "version": "3.6.9"
   },
   "vscode": {
    "interpreter": {

+ 0 - 6
requirements-dev.txt

@@ -1,6 +0,0 @@
-pytest==6.2.5  # see https://github.com/pytest-dev/pytest/issues/9621
-pytest-forked
-pytest-asyncio==0.16.0
-black==22.3.0
-isort==5.10.1
-psutil

+ 0 - 9
requirements.txt

@@ -1,9 +0,0 @@
-torch>=1.12
-bitsandbytes==0.34.0
-accelerate==0.10.0
-huggingface-hub==0.7.0
-transformers==4.21.3
-protobuf>=3.20.3,<4.0dev
-git+https://github.com/learning-at-home/hivemind@be88b4280cdd87432168e1da238e532f1364078b
-humanfriendly
-async-timeout>=4.0.2