diff --git a/.github/workflows/call_gpu_tests.yml b/.github/workflows/call_gpu_tests.yml index 525708218..a692fb5f4 100644 --- a/.github/workflows/call_gpu_tests.yml +++ b/.github/workflows/call_gpu_tests.yml @@ -32,52 +32,37 @@ on: type: string jobs: - basic-tests-on-gpu: + gpu_tests: runs-on: ${{ inputs.os }} steps: - - name: checkout repo ${{ github.event_name == 'workflow_dispatch' && inputs.commit_id || github.sha }} + - name: Checkout repo at ${{ github.event_name == 'workflow_dispatch' && inputs.commit_id || github.sha }} uses: actions/checkout@v4 with: ref: ${{ github.event_name == 'workflow_dispatch' && inputs.commit_id || github.sha }} + - name: Set up Python ${{ inputs.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} - name: Install Rust shell: bash run: | curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.75.0 echo "$HOME/.cargo/bin" >> $GITHUB_PATH - - name: Set up Python ${{ inputs.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ inputs.python-version }} - - name: Show GPUs + - name: Install NVIDIA SDK run: | nvidia-smi - - name: Update Ubuntu - run: | - sudo apt-get update - sudo apt-get -y upgrade - - name: Ensure NVIDIA SDK available - run: | - sudo apt-get -y install cuda-toolkit-12.6 + sudo apt-get --yes update + sudo apt-get --yes install cuda-toolkit-12.6 echo "/usr/local/cuda-12.6/bin" >> $GITHUB_PATH - - name: Install dependencies - shell: bash - run: | - python -m pip install --upgrade pip - pip install pytest - pip install -e .[schemas,test,bench] - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: GPU pip installs + - name: Install guidance in ${{ inputs.os }} run: | - pip install sentencepiece - pip install accelerate - echo "==============================" - pip uninstall -y transformers - pip uninstall -y llama-cpp-python - CMAKE_ARGS="-DGGML_CUDA=on" pip install -e .[llamacpp,transformers] + pip install --upgrade pip + pip install sentencepiece accelerate + CMAKE_ARGS="-DGGML_CUDA=on" pip install -e .[llamacpp,transformers,test] - name: Check GPU available run: | python -c "import torch; assert torch.cuda.is_available()" - - name: Run tests (except server) for ${{ inputs.model }} + - name: gpu_tests for ${{ inputs.model }} run: | pytest -vv --cov=guidance --cov-report=xml --cov-report=term-missing \ --selected_model ${{ inputs.model }} \