model: d_model: 32 d_state: 2 d_conv: 4 expand: 2 n_layers: 1 dropout: 0.5 data: batch_size: 32 max_nodes: 10000 test_split: 0.2 training: learning_rate: 0.01 weight_decay: 0.0005 epochs: 200 patience: 20 warmup_epochs: 10 min_lr: 1e-6 ordering: strategy: "bfs" # bfs, spectral, degree, community preserve_locality: true evaluation: metrics: ["accuracy", "f1_macro", "f1_micro"] save_best: true