#!/bin/bash #SBATCH --job-name=dppo_install #SBATCH --account=hk-project-p0022232 #SBATCH --partition=dev_accelerated #SBATCH --gres=gpu:1 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 #SBATCH --cpus-per-task=4 #SBATCH --time=00:30:00 #SBATCH --mem=16G #SBATCH --output=logs/dppo_install_%j.out #SBATCH --error=logs/dppo_install_%j.err # Load CUDA module (required for PyTorch) module load devel/cuda/12.4 # Print job info echo "Starting DPPO installation..." echo "Job ID: $SLURM_JOB_ID" echo "Node: $SLURM_NODELIST" echo "GPU: $CUDA_VISIBLE_DEVICES" # Navigate to dppo directory (uses current directory) cd $SLURM_SUBMIT_DIR # Create and activate virtual environment with Python 3.10 python3.10 -m venv .venv source .venv/bin/activate # Upgrade pip pip install --upgrade pip # Install base package pip install -e . # Install gym dependencies (optional - comment out if not needed) pip install -e .[gym] echo "Installation completed!" echo "Python version: $(python --version)" echo "Pip version: $(pip --version)" echo "Installed packages:" pip list