Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
106 commits
Select commit Hold shift + click to select a range
99386bf
begin implementation of BaseDataIOIterator
Drew-Wagner Jun 25, 2024
04b1524
refactor common logic to baseDataIOIterator
Drew-Wagner Jun 25, 2024
c62cfd0
update authors
Drew-Wagner Jun 25, 2024
f454dfa
fix typing error
Drew-Wagner Jun 25, 2024
39948b2
bug fixes and returning channel positions
Drew-Wagner Jun 27, 2024
aadeb29
return adjacency matrix
Drew-Wagner Jun 27, 2024
541edf1
EEGNet with spatial focus module
Drew-Wagner Jun 29, 2024
25bcb1b
bug fix
Drew-Wagner Jun 29, 2024
700a1b2
bug fix
Drew-Wagner Jun 29, 2024
0c6242f
explicitely define model
Drew-Wagner Jun 29, 2024
99647de
update softmax dim
Drew-Wagner Jun 29, 2024
0c7e0d1
update sum dim
Drew-Wagner Jun 29, 2024
6519732
fix num flat features
Drew-Wagner Jul 3, 2024
5cb5840
don't apply spatial focus during num flat features (because the shape…
Drew-Wagner Jul 3, 2024
c56b6fd
update dataio iterators
Drew-Wagner Jul 3, 2024
24eae4a
add torch_geometric to requirements
Drew-Wagner Jul 3, 2024
596d7c6
default positions to support torchsummary
Drew-Wagner Jul 3, 2024
873527e
move positions to device
Drew-Wagner Jul 3, 2024
1baf576
convert positions to float
Drew-Wagner Jul 3, 2024
f59e21f
update hparams for spatial focus
Drew-Wagner Jul 3, 2024
f40a6ea
init train_slurm script
Drew-Wagner Jul 3, 2024
7efd77d
venv no download
Drew-Wagner Jul 3, 2024
d110e2b
chmod
Drew-Wagner Jul 3, 2024
eec4fe7
typo
Drew-Wagner Jul 3, 2024
ff0f0cc
remove mne constraint
Drew-Wagner Jul 3, 2024
26e1de1
install h5py manually
Drew-Wagner Jul 3, 2024
ed1ca3a
don't install from extra_requiremnts
Drew-Wagner Jul 3, 2024
1888bfa
update script
Drew-Wagner Jul 3, 2024
b44f721
fix h5py requirement
Drew-Wagner Jul 3, 2024
a3b348f
move h5py higher
Drew-Wagner Jul 3, 2024
5155895
update moabb version
Drew-Wagner Jul 3, 2024
05ebec7
update mne version
Drew-Wagner Jul 3, 2024
3948676
memory profiler req
Drew-Wagner Jul 3, 2024
a31dc4e
update mne bids version
Drew-Wagner Jul 3, 2024
1aecf5f
update pyriemann version
Drew-Wagner Jul 3, 2024
cb0368f
update scripts
Drew-Wagner Jul 3, 2024
5537299
fix cd
Drew-Wagner Jul 3, 2024
d7ba569
normalize positions
Drew-Wagner Jul 3, 2024
c45e1d2
break up scripts
Drew-Wagner Jul 3, 2024
7eeaf27
add interpretter commend
Drew-Wagner Jul 5, 2024
973000b
activate env
Drew-Wagner Jul 5, 2024
9489c13
module load
Drew-Wagner Jul 5, 2024
0a8a5c3
make executable
Drew-Wagner Jul 5, 2024
430fb4c
update env names
Drew-Wagner Jul 5, 2024
265f997
write with einsum instead
Drew-Wagner Jul 8, 2024
274e5af
remove knn
Drew-Wagner Jul 9, 2024
5c70369
use native pytorch cosine similarity
Drew-Wagner Jul 9, 2024
d784c09
add temperature softmax
Drew-Wagner Jul 10, 2024
431b82f
spatial focus experiments
Drew-Wagner Jul 18, 2024
805bcfb
fixed spatial focus (can now train to near full accuracy)
Drew-Wagner Jul 18, 2024
daa1ed2
adjust priors
Drew-Wagner Jul 23, 2024
067dcec
fix leave-one-subject-out
Drew-Wagner Jul 23, 2024
96c01b4
refactor to expose encoder
Drew-Wagner Jul 23, 2024
4bab70f
rename -> SpatialEEGNet
Drew-Wagner Jul 30, 2024
af31aac
remove position noise
Drew-Wagner Aug 5, 2024
5283b69
fix lazy conversion to graph
Drew-Wagner Aug 5, 2024
3aa906f
torch_geometric implementations of data iterators
Drew-Wagner Aug 6, 2024
97b08a3
handle data_folder inputs
Drew-Wagner Aug 6, 2024
48c01aa
BREAKING: update train py to be compatible with new graphs
Drew-Wagner Aug 6, 2024
93b3f95
Implement position noise + node dropout
Drew-Wagner Aug 6, 2024
bc6c12b
fix file suffix
Drew-Wagner Aug 6, 2024
341bb30
update spatial eegnet
Drew-Wagner Aug 6, 2024
80aedcd
tmp change idx
Drew-Wagner Aug 6, 2024
4eb2c85
don't update C
Drew-Wagner Aug 6, 2024
96d8f92
add C ref
Drew-Wagner Aug 6, 2024
9465f03
fix dist
Drew-Wagner Aug 6, 2024
a324108
fix dist
Drew-Wagner Aug 6, 2024
cb7bb79
fix cls name
Drew-Wagner Aug 6, 2024
556277f
fix node_wise
Drew-Wagner Aug 6, 2024
d313d43
fix node wise
Drew-Wagner Aug 6, 2024
6cb5939
dataset -> datasets
Drew-Wagner Aug 6, 2024
6c6397c
target sessions / subjects plural
Drew-Wagner Aug 6, 2024
f85d49b
sample_rate -> resample
Drew-Wagner Aug 6, 2024
106f57a
events_to_load -> events
Drew-Wagner Aug 6, 2024
3a6bffb
fix class weights
Drew-Wagner Aug 6, 2024
560ad9d
typo
Drew-Wagner Aug 6, 2024
acb4e9a
some fixes
Drew-Wagner Aug 6, 2024
648b468
no summary
Drew-Wagner Aug 6, 2024
cb72ff2
lengths
Drew-Wagner Aug 6, 2024
cd9da2f
disable graph augment for testing
Drew-Wagner Aug 6, 2024
4cef9c8
disable augment for testing
Drew-Wagner Aug 6, 2024
9753be7
fix min max
Drew-Wagner Aug 6, 2024
5406ac7
typo
Drew-Wagner Aug 6, 2024
2bd46a5
apply instead of new
Drew-Wagner Aug 6, 2024
b494382
no support for discrete dist
Drew-Wagner Aug 6, 2024
50c6808
fix event id start at 0
Drew-Wagner Aug 6, 2024
31285a4
fix target issue
Drew-Wagner Aug 6, 2024
5b12fc7
update hparams
Drew-Wagner Aug 6, 2024
b52f550
fix data augmentation
Drew-Wagner Aug 7, 2024
1e2be5b
index subject + formatting fixes
Drew-Wagner Aug 7, 2024
c88151d
map to int
Drew-Wagner Aug 9, 2024
45463cd
fix session issue
Drew-Wagner Aug 9, 2024
357e26f
remove assert which is sometimes incorrectly triggered
Drew-Wagner Aug 17, 2024
b4029f8
hparams for cross subject
Drew-Wagner Aug 19, 2024
f74b851
hparams 004
Drew-Wagner Aug 19, 2024
125e1ad
fix augmentation
Drew-Wagner Aug 22, 2024
0d2001b
configure caching
Drew-Wagner Aug 25, 2024
10363f4
fix git mistake
Drew-Wagner Aug 26, 2024
994dc5d
nb to visual
Drew-Wagner Sep 1, 2024
c138df4
added within-subject yaml configs for 4 datasets
engmubarak48 Sep 3, 2024
9bf0946
updated number of epochs for orion search to 500-1000
engmubarak48 Sep 3, 2024
8e84c6f
corected number epochs in the orion flag
engmubarak48 Sep 4, 2024
4f0a6da
fix bug in the config and comment confusion matrix
engmubarak48 Sep 4, 2024
6abe1fd
hparams 009
Drew-Wagner Oct 15, 2024
a3bd28a
hparams 2015001
Oct 15, 2024
505d21b
hparams 2014004
Drew-Wagner Oct 15, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -158,4 +158,8 @@ dmypy.json
**/log/

# Mac OS
.DS_Store
.DS_Store

results
eeg_data
trial_*.conf
3 changes: 2 additions & 1 deletion benchmarks/MOABB/extra-requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
mne==1.6.1
mne
moabb
orion
orion[profet]
scikit-learn
torch_geometric
torchinfo
187 changes: 187 additions & 0 deletions benchmarks/MOABB/hparams/MotorImagery/BNCI2014001/SpatialEEGNet.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
seed: 1234
__set_torchseed: !apply:torch.manual_seed [!ref <seed>]

# DIRECTORIES
data_folder:
!PLACEHOLDER #'/path/to/dataset'. The dataset will be automatically downloaded in this folder


cached_data_folder: !PLACEHOLDER #'path/to/pickled/dataset'


output_folder: !PLACEHOLDER
#'path/to/results'

# DATASET HPARS
# Defining the MOABB dataset.


datasets:
- !new:moabb.datasets.BNCI2014001
save_prepared_dataset: True # set to True if you want to save the prepared dataset as a pkl file to load and use afterwards
data_iterator_name: !PLACEHOLDER
target_subject_idx: !PLACEHOLDER
target_session_idx: !PLACEHOLDER
events_to_load: null # all events will be loaded
original_sample_rate: 250 # Original sampling rate provided by dataset authors
sample_rate: 125 # Target sampling rate (Hz)
# band-pass filtering cut-off frequencies
fmin: 0.13 # orion_step1: --fmin~"uniform(0.1, 5, precision=2)"
fmax: 46.0 # @orion_step1: --fmax~"uniform(20.0, 50.0, precision=3)"
n_classes: 4
# tmin, tmax respect to stimulus onset that define the interval attribute of the dataset class
# trial begins (0 s), cue (2 s, 1.25 s long); each trial is 6 s long
# dataset interval starts from 2
# -->tmin tmax are referred to this start value (e.g., tmin=0.5 corresponds to 2.5 s)
tmin: 0.
tmax: 4.0 # @orion_step1: --tmax~"uniform(1.0, 4.0, precision=2)"
n_steps_channel_selection: null
T: !apply:math.ceil
- !ref <sample_rate> * (<tmax> - <tmin>)
C: 22
# We here specify how to perfom test:
# - If test_with: 'last' we perform test with the latest model.
# - if test_with: 'best, we perform test with the best model (according to the metric specified in test_key)
# The variable avg_models can be used to average the parameters of the last (or best) N saved models before testing.
# This can have a regularization effect. If avg_models: 1, the last (or best) model is used directly.
test_with: "last" # 'last' or 'best'
test_key: "acc" # Possible opts: "loss", "f1", "auc", "acc"

# METRICS
f1: !name:sklearn.metrics.f1_score
average: "macro"
acc: !name:sklearn.metrics.balanced_accuracy_score
cm: !name:sklearn.metrics.confusion_matrix
metrics:
f1: !ref <f1>
acc: !ref <acc>
cm: !ref <cm>
# TRAINING HPARS
n_train_examples: 100 # it will be replaced in the train script
# checkpoints to average
avg_models: 10 # @orion_step1: --avg_models~"uniform(1, 15,discrete=True)"
number_of_epochs: 862 # @orion_step1: --number_of_epochs~"uniform(500, 1000, discrete=True)"
lr: 0.0001 # orion_step1: --lr~"choices([0.01, 0.005, 0.001, 0.0005, 0.0001])"
# Learning rate scheduling (cyclic learning rate is used here)
max_lr: !ref <lr> # Upper bound of the cycle (max value of the lr)
base_lr: 0.00000001 # Lower bound in the cycle (min value of the lr)
step_size_multiplier: 5 #from 2 to 8
step_size: !apply:round
- !ref <step_size_multiplier> * <n_train_examples> / <batch_size>
lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler
base_lr: !ref <base_lr>
max_lr: !ref <max_lr>
step_size: !ref <step_size>
label_smoothing: 0.0
loss: !name:speechbrain.nnet.losses.nll_loss
label_smoothing: !ref <label_smoothing>
optimizer: !name:torch.optim.Adam
lr: !ref <lr>
epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter # epoch counter
limit: !ref <number_of_epochs>
batch_size_exponent: 4 # @orion_step1: --batch_size_exponent~"uniform(4, 6,discrete=True)"
batch_size: !ref 2 ** <batch_size_exponent>
valid_ratio: 0.2

# DATA AUGMENTATION
# cutcat (disabled when min_num_segments=max_num_segments=1)
max_num_segments: 3 # orion_step2: --max_num_segments~"uniform(2, 6, discrete=True)"
cutcat: !new:speechbrain.augment.time_domain.CutCat
min_num_segments: 2
max_num_segments: !ref <max_num_segments>
# random amplitude gain between 0.5-1.5 uV (disabled when amp_delta=0.)
amp_delta: 0.01742 # orion_step2: --amp_delta~"uniform(0.0, 0.5)"
rand_amp: !new:speechbrain.augment.time_domain.RandAmp
amp_low: !ref 1 - <amp_delta>
amp_high: !ref 1 + <amp_delta>
# random shifts between -300 ms to 300 ms (disabled when shift_delta=0.)
shift_delta_: 1 # orion_step2: --shift_delta_~"uniform(0, 25, discrete=True)"
shift_delta: !ref 1e-2 * <shift_delta_> # 0.250 # 0.-0.25 with steps of 0.01
min_shift: !apply:math.floor
- !ref 0 - <sample_rate> * <shift_delta>
max_shift: !apply:math.floor
- !ref 0 + <sample_rate> * <shift_delta>
time_shift: !new:speechbrain.augment.freq_domain.RandomShift
min_shift: !ref <min_shift>
max_shift: !ref <max_shift>
dim: 1
# injection of gaussian white noise
snr_white_low: 15.0 # orion_step2: --snr_white_low~"uniform(0.0, 15, precision=2)"
snr_white_delta: 19.1 # orion_step2: --snr_white_delta~"uniform(5.0, 20.0, precision=3)"
snr_white_high: !ref <snr_white_low> + <snr_white_delta>
add_noise_white: !new:speechbrain.augment.time_domain.AddNoise
snr_low: !ref <snr_white_low>
snr_high: !ref <snr_white_high>
position_noise_sigma: 0.01 # @orion_step1: --position_noise_sigma~"uniform(0.0, 0.1, precision=4)"

repeat_augment: 1 # @orion_step1: --repeat_augment 0
graph_augment: !new:torch.nn.Sequential
- !new:utils.graph_iterators.PositionNoise
sigma: !ref <position_noise_sigma>
- !new:utils.graph_iterators.NodeDrop
choose_k: 17
augment: !new:speechbrain.augment.augmenter.Augmenter
parallel_augment: True
concat_original: True
parallel_augment_fixed_bs: True
repeat_augment: !ref <repeat_augment>
shuffle_augmentations: True
min_augmentations: 4
max_augmentations: 4
augmentations:
[!ref <cutcat>, !ref <rand_amp>, !ref <time_shift>, !ref <add_noise_white>]

# DATA NORMALIZATION
dims_to_normalize: 1 # 1 (time) or 2 (EEG channels)
normalize: !name:speechbrain.processing.signal_processing.mean_std_norm
dims: !ref <dims_to_normalize>
pos_normalize: True

# MODEL
projection_dim: 22 # @orion_step1: --projection_dim~"uniform(4, 44, discrete=True)"
spatial_focus_tau: 0.3333 # @orion_step1: --spatial_focus_tau~"uniform(0.01, 1.0, precision=4)"

input_shape: [null, !ref <T>, !ref <projection_dim>, null]
cnn_temporal_kernels: 61 # @orion_step1: --cnn_temporal_kernels~"uniform(32, 72,discrete=True)"
cnn_temporal_kernelsize: 51 # @orion_step1: --cnn_temporal_kernelsize~"uniform(24, 62,discrete=True)"
# depth multiplier for the spatial depthwise conv. layer
cnn_spatial_depth_multiplier: 4 # @orion_step1: --cnn_spatial_depth_multiplier~"uniform(1, 4,discrete=True)"
cnn_spatial_max_norm: 1. # kernel max-norm constaint of the spatial depthwise conv. layer
cnn_spatial_pool: 4
cnn_septemporal_depth_multiplier: 1 # depth multiplier for the separable temporal conv. layer
cnn_septemporal_point_kernels_ratio_: 7 # @orion_step1: --cnn_septemporal_point_kernels_ratio_~"uniform(0, 8, discrete=True)"
cnn_septemporal_point_kernels_ratio: !ref <cnn_septemporal_point_kernels_ratio_> / 4
## number of temporal filters in the separable temporal conv. layer
cnn_septemporal_point_kernels_: !ref <cnn_temporal_kernels> * <cnn_spatial_depth_multiplier> * <cnn_septemporal_depth_multiplier>
cnn_septemporal_point_kernels: !apply:math.ceil
- !ref <cnn_septemporal_point_kernels_ratio> * <cnn_septemporal_point_kernels_> + 1
cnn_septemporal_kernelsize_: 15 # @orion_step1: --cnn_septemporal_kernelsize_~"uniform(3, 24,discrete=True)"
max_cnn_spatial_pool: 4
cnn_septemporal_kernelsize: !apply:round
- !ref <cnn_septemporal_kernelsize_> * <max_cnn_spatial_pool> / <cnn_spatial_pool>
cnn_septemporal_pool: 7 # @orion_step1: --cnn_septemporal_pool~"uniform(1, 8,discrete=True)"
cnn_pool_type: "avg"
dense_max_norm: 0.25 # kernel max-norm constaint of the dense layer
dropout: 0.008464 # @orion_step1: --dropout~"uniform(0.0, 0.5)"
activation_type: "elu"

model: !new:models.SpatialEEGNet.SpatialEEGNet
input_shape: !ref <input_shape>
cnn_temporal_kernels: !ref <cnn_temporal_kernels>
cnn_temporal_kernelsize: [!ref <cnn_temporal_kernelsize>, 1]
cnn_spatial_depth_multiplier: !ref <cnn_spatial_depth_multiplier>
cnn_spatial_max_norm: !ref <cnn_spatial_max_norm>
cnn_spatial_pool: [!ref <cnn_spatial_pool>, 1]
cnn_septemporal_depth_multiplier: !ref <cnn_septemporal_depth_multiplier>
cnn_septemporal_point_kernels: !ref <cnn_septemporal_point_kernels>
cnn_septemporal_kernelsize: [!ref <cnn_septemporal_kernelsize>, 1]
cnn_septemporal_pool: [!ref <cnn_septemporal_pool>, 1]
cnn_pool_type: !ref <cnn_pool_type>
activation_type: !ref <activation_type>
spatial_focus: !new:models.SpatialEEGNet.SpatialFocus
projection_dim: !ref <projection_dim>
position_dim: 3
tau: !ref <spatial_focus_tau>
dense_max_norm: !ref <dense_max_norm>
dropout: !ref <dropout>
dense_n_neurons: !ref <n_classes>
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
seed: 1234
__set_torchseed: !apply:torch.manual_seed [!ref <seed>]

# DIRECTORIES
data_folder:
!PLACEHOLDER #'/path/to/dataset'. The dataset will be automatically downloaded in this folder


cached_data_folder: !PLACEHOLDER #'path/to/pickled/dataset'


output_folder: !PLACEHOLDER
#'path/to/results'

# DATASET HPARS
# Defining the MOABB dataset.


datasets:
- !new:moabb.datasets.BNCI2014001
save_prepared_dataset: True # set to True if you want to save the prepared dataset as a pkl file to load and use afterwards
data_iterator_name: !PLACEHOLDER
target_subject_idx: !PLACEHOLDER
target_session_idx: !PLACEHOLDER
events_to_load: null # all events will be loaded
original_sample_rate: 250 # Original sampling rate provided by dataset authors
sample_rate: 125 # Target sampling rate (Hz)
# band-pass filtering cut-off frequencies
fmin: 0.13 # orion_step1: --fmin~"uniform(0.1, 5, precision=2)"
fmax: 46.0 # @orion_step1: --fmax~"uniform(20.0, 50.0, precision=3)"
n_classes: 4
# tmin, tmax respect to stimulus onset that define the interval attribute of the dataset class
# trial begins (0 s), cue (2 s, 1.25 s long); each trial is 6 s long
# dataset interval starts from 2
# -->tmin tmax are referred to this start value (e.g., tmin=0.5 corresponds to 2.5 s)
tmin: 0.
tmax: 4.0 # @orion_step1: --tmax~"uniform(1.0, 4.0, precision=2)"
n_steps_channel_selection: null
T: !apply:math.ceil
- !ref <sample_rate> * (<tmax> - <tmin>)
C: 22
# We here specify how to perfom test:
# - If test_with: 'last' we perform test with the latest model.
# - if test_with: 'best, we perform test with the best model (according to the metric specified in test_key)
# The variable avg_models can be used to average the parameters of the last (or best) N saved models before testing.
# This can have a regularization effect. If avg_models: 1, the last (or best) model is used directly.
test_with: "last" # 'last' or 'best'
test_key: "acc" # Possible opts: "loss", "f1", "auc", "acc"

# METRICS
f1: !name:sklearn.metrics.f1_score
average: "macro"
acc: !name:sklearn.metrics.balanced_accuracy_score
cm: !name:sklearn.metrics.confusion_matrix
metrics:
f1: !ref <f1>
acc: !ref <acc>
cm: !ref <cm>
# TRAINING HPARS
n_train_examples: 100 # it will be replaced in the train script
# checkpoints to average
avg_models: 10 # @orion_step1: --avg_models~"uniform(1, 15,discrete=True)"
number_of_epochs: 100 # orion_step1: --number_of_epochs~"uniform(500, 1000, discrete=True)"
lr: 0.0001 # orion_step1: --lr~"choices([0.01, 0.005, 0.001, 0.0005, 0.0001])"
# Learning rate scheduling (cyclic learning rate is used here)
max_lr: !ref <lr> # Upper bound of the cycle (max value of the lr)
base_lr: 0.00000001 # Lower bound in the cycle (min value of the lr)
step_size_multiplier: 5 #from 2 to 8
step_size: !apply:round
- !ref <step_size_multiplier> * <n_train_examples> / <batch_size>
lr_annealing: !new:speechbrain.nnet.schedulers.CyclicLRScheduler
base_lr: !ref <base_lr>
max_lr: !ref <max_lr>
step_size: !ref <step_size>
label_smoothing: 0.0
loss: !name:speechbrain.nnet.losses.nll_loss
label_smoothing: !ref <label_smoothing>
optimizer: !name:torch.optim.Adam
lr: !ref <lr>
epoch_counter: !new:speechbrain.utils.epoch_loop.EpochCounter # epoch counter
limit: !ref <number_of_epochs>
batch_size_exponent: 4 # @orion_step1: --batch_size_exponent~"uniform(4, 6,discrete=True)"
batch_size: !ref 2 ** <batch_size_exponent>
valid_ratio: 0.2

# DATA AUGMENTATION
# cutcat (disabled when min_num_segments=max_num_segments=1)
max_num_segments: 3 # @orion_step2: --max_num_segments~"uniform(2, 6, discrete=True)"
cutcat: !new:speechbrain.augment.time_domain.CutCat
min_num_segments: 2
max_num_segments: !ref <max_num_segments>
# random amplitude gain between 0.5-1.5 uV (disabled when amp_delta=0.)
amp_delta: 0.01742 # @orion_step2: --amp_delta~"uniform(0.0, 0.5)"
rand_amp: !new:speechbrain.augment.time_domain.RandAmp
amp_low: !ref 1 - <amp_delta>
amp_high: !ref 1 + <amp_delta>
# random shifts between -300 ms to 300 ms (disabled when shift_delta=0.)
shift_delta_: 1 # @orion_step2: --shift_delta_~"uniform(0, 25, discrete=True)"
shift_delta: !ref 1e-2 * <shift_delta_> # 0.250 # 0.-0.25 with steps of 0.01
min_shift: !apply:math.floor
- !ref 0 - <sample_rate> * <shift_delta>
max_shift: !apply:math.floor
- !ref 0 + <sample_rate> * <shift_delta>
time_shift: !new:speechbrain.augment.freq_domain.RandomShift
min_shift: !ref <min_shift>
max_shift: !ref <max_shift>
dim: 1
# injection of gaussian white noise
snr_white_low: 15.0 # @orion_step2: --snr_white_low~"uniform(0.0, 15, precision=2)"
snr_white_delta: 19.1 # @orion_step2: --snr_white_delta~"uniform(5.0, 20.0, precision=3)"
snr_white_high: !ref <snr_white_low> + <snr_white_delta>
add_noise_white: !new:speechbrain.augment.time_domain.AddNoise
snr_low: !ref <snr_white_low>
snr_high: !ref <snr_white_high>
position_noise_sigma: 0.01 # @orion_step1: --position_noise_sigma~"uniform(0.0, 0.1, precision=4)"

repeat_augment: 1 # @orion_step1: --repeat_augment 0
graph_augment: !new:torch.nn.Sequential
- !new:utils.graph_iterators.PositionNoise
sigma: !ref <position_noise_sigma>
# - !new:utils.graph_iterators.NodeDrop
# choose_k: 17
augment: !new:speechbrain.augment.augmenter.Augmenter
parallel_augment: True
concat_original: True
parallel_augment_fixed_bs: True
repeat_augment: !ref <repeat_augment>
shuffle_augmentations: True
min_augmentations: 4
max_augmentations: 4
augmentations:
[!ref <cutcat>, !ref <rand_amp>, !ref <time_shift>, !ref <add_noise_white>]

# DATA NORMALIZATION
dims_to_normalize: 1 # 1 (time) or 2 (EEG channels)
normalize: !name:speechbrain.processing.signal_processing.mean_std_norm
dims: !ref <dims_to_normalize>
pos_normalize: True

# MODEL
projection_dim: 22 # @orion_step1: --projection_dim~"uniform(4, 44, discrete=True)"
spatial_focus_tau: 0.3333 # @orion_step1: --spatial_focus_tau~"uniform(0.001, 1.0, precision=4)"

input_shape: [null, !ref <T>, !ref <projection_dim>, null]
cnn_temporal_kernels: 61 # @orion_step1: --cnn_temporal_kernels~"uniform(32, 72,discrete=True)"
cnn_temporal_kernelsize: 51 # @orion_step1: --cnn_temporal_kernelsize~"uniform(24, 62,discrete=True)"
# depth multiplier for the spatial depthwise conv. layer
cnn_spatial_depth_multiplier: 4 # @orion_step1: --cnn_spatial_depth_multiplier~"uniform(1, 4,discrete=True)"
cnn_spatial_max_norm: 1. # kernel max-norm constaint of the spatial depthwise conv. layer
cnn_spatial_pool: 4
cnn_septemporal_depth_multiplier: 1 # depth multiplier for the separable temporal conv. layer
cnn_septemporal_point_kernels_ratio_: 7 # @orion_step1: --cnn_septemporal_point_kernels_ratio_~"uniform(0, 8, discrete=True)"
cnn_septemporal_point_kernels_ratio: !ref <cnn_septemporal_point_kernels_ratio_> / 4
## number of temporal filters in the separable temporal conv. layer
cnn_septemporal_point_kernels_: !ref <cnn_temporal_kernels> * <cnn_spatial_depth_multiplier> * <cnn_septemporal_depth_multiplier>
cnn_septemporal_point_kernels: !apply:math.ceil
- !ref <cnn_septemporal_point_kernels_ratio> * <cnn_septemporal_point_kernels_> + 1
cnn_septemporal_kernelsize_: 15 # @orion_step1: --cnn_septemporal_kernelsize_~"uniform(3, 24,discrete=True)"
max_cnn_spatial_pool: 4
cnn_septemporal_kernelsize: !apply:round
- !ref <cnn_septemporal_kernelsize_> * <max_cnn_spatial_pool> / <cnn_spatial_pool>
cnn_septemporal_pool: 7 # @orion_step1: --cnn_septemporal_pool~"uniform(1, 8,discrete=True)"
cnn_pool_type: "avg"
dense_max_norm: 0.25 # kernel max-norm constaint of the dense layer
dropout: 0.008464 # @orion_step1: --dropout~"uniform(0.0, 0.5)"
activation_type: "elu"

model: !new:models.SpatialEEGNet.SpatialEEGNet
input_shape: !ref <input_shape>
cnn_temporal_kernels: !ref <cnn_temporal_kernels>
cnn_temporal_kernelsize: [!ref <cnn_temporal_kernelsize>, 1]
cnn_spatial_depth_multiplier: !ref <cnn_spatial_depth_multiplier>
cnn_spatial_max_norm: !ref <cnn_spatial_max_norm>
cnn_spatial_pool: [!ref <cnn_spatial_pool>, 1]
cnn_septemporal_depth_multiplier: !ref <cnn_septemporal_depth_multiplier>
cnn_septemporal_point_kernels: !ref <cnn_septemporal_point_kernels>
cnn_septemporal_kernelsize: [!ref <cnn_septemporal_kernelsize>, 1]
cnn_septemporal_pool: [!ref <cnn_septemporal_pool>, 1]
cnn_pool_type: !ref <cnn_pool_type>
activation_type: !ref <activation_type>
spatial_focus: !new:models.SpatialEEGNet.SpatialFocus
projection_dim: !ref <projection_dim>
position_dim: 3
tau: !ref <spatial_focus_tau>
dense_max_norm: !ref <dense_max_norm>
dropout: !ref <dropout>
dense_n_neurons: !ref <n_classes>
Loading