name: DEFAULT project: Spikey slurm: name: 'Spikey_{config[name]}' partitions: - single standard_output: ./reports/slurm/out_%A_%a.log standard_error: ./reports/slurm/err_%A_%a.log num_parallel_jobs: 50 cpus_per_task: 8 memory_per_cpu: 4000 time_limit: 1440 # in minutes ntasks: 1 venv: '.venv/bin/activate' sh_lines: - 'mkdir -p {tmp}/wandb' - 'mkdir -p {tmp}/local_pycache' - 'export PYTHONPYCACHEPREFIX={tmp}/local_pycache' runner: spikey scheduler: reps_per_version: 1 agents_per_job: 1 reps_per_agent: 1 wandb: project: '{config[project]}' group: '{config[name]}' job_type: '{delta_desc}' name: '{job_id}_{task_id}:{run_id}:{rand}={config[name]}_{delta_desc}' #tags: # - '{config[env][name]}' # - '{config[algo][name]}' sync_tensorboard: False monitor_gym: False save_code: False --- name: Test import: $ latent_projector: type: rnn # Options: 'fc', 'rnn' input_size: 19531 # =1s Input size for the Latent Projector (length of snippets). latent_size: 8 # Size of the latent representation before message passing. #layer_shapes: [256, 32] # List of layer sizes for the latent projector (if type is 'fc'). #activations: ['ReLU', 'ReLU'] # Activation functions for the latent projector layers (if type is 'fc'). rnn_hidden_size: 12 # Hidden size for the RNN projector (if type is 'rnn'). rnn_num_layers: 1 # Number of layers for the RNN projector (if type is 'rnn'). middle_out: output_size: 8 # Size of the latent representation after message passing. num_peers: 3 # Number of most correlated peers to consider. predictor: layer_shapes: [8, 4] # List of layer sizes for the predictor. activations: ['ReLU', 'None'] # Activation functions for the predictor layers. training: epochs: 128 # Number of training epochs. batch_size: 64 # Batch size for training. num_batches: 16 # Batches per epoch learning_rate: 0.001 # Learning rate for the optimizer. eval_freq: -1 # 8 # Frequency of evaluation during training (in epochs). save_path: models # Directory to save the best model and encoder. num_points: 1000 # Number of data points to visualize evaluation: full_compression: false # Perform full compression during evaluation bitstream_encoding: type: identity # Options: 'arithmetic', 'identity', 'bzip2' data: url: https://content.neuralink.com/compression-challenge/data.zip # URL to download the dataset. directory: data # Directory to extract and store the dataset. split_ratio: 0.8 # Ratio to split the data into train and test sets. cut_length: null # Optional length to cut sequences to. profiler: enable: false