diff --git a/experiments/ankle_experiments/2D_mse_tv_1176.yml b/experiments/ankle_experiments/2D_mse_tv_1176.yml deleted file mode 100644 index 82f7199..0000000 --- a/experiments/ankle_experiments/2D_mse_tv_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ankle_experiments/2D_perceptual_tv_1176.yml b/experiments/ankle_experiments/2D_perceptual_tv_1176.yml deleted file mode 100644 index c894993..0000000 --- a/experiments/ankle_experiments/2D_perceptual_tv_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ankle_experiments/2D_ssim_1176.yml b/experiments/ankle_experiments/2D_ssim_1176.yml deleted file mode 100644 index 8cd39d9..0000000 --- a/experiments/ankle_experiments/2D_ssim_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ankle_experiments/3D_mse_tv_1176.yml b/experiments/ankle_experiments/3D_mse_tv_1176.yml deleted file mode 100644 index bb82e18..0000000 --- a/experiments/ankle_experiments/3D_mse_tv_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ankle_experiments/3D_perceptual_tv_1176.yml b/experiments/ankle_experiments/3D_perceptual_tv_1176.yml deleted file mode 100644 index d452288..0000000 --- a/experiments/ankle_experiments/3D_perceptual_tv_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ankle_experiments/3D_ssim_1176.yml b/experiments/ankle_experiments/3D_ssim_1176.yml deleted file mode 100644 index fda0fd5..0000000 --- a/experiments/ankle_experiments/3D_ssim_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/2D_perceptual_tv_1176_HR_fulldata.yml b/experiments/baselines/2D_perceptual_tv_1176_HR_fulldata.yml deleted file mode 100644 index 74ba71f..0000000 --- a/experiments/baselines/2D_perceptual_tv_1176_HR_fulldata.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/2D_perceptualnet_1176_HR.yml b/experiments/baselines/2D_perceptualnet_1176_HR.yml deleted file mode 100644 index aab44fa..0000000 --- a/experiments/baselines/2D_perceptualnet_1176_HR.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176_HR - autoencoder_layers: False - pretrain: False - existing_model: 2021_02_25_05_51_33_1_3D_perceptualnet_ds_mse_tv - magnification: 4 - n_blocks: 15 - preceptual_criterion: l1 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True- - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/2D_perceptualnet_cm.yml b/experiments/baselines/2D_perceptualnet_cm.yml deleted file mode 100644 index a591280..0000000 --- a/experiments/baselines/2D_perceptualnet_cm.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176_HR - autoencoder_layers: False - pretrain: False - existing_model: 2021_02_25_05_51_33_1_3D_perceptualnet_ds_mse_tv - magnification: 4 - n_blocks: 15 - preceptual_criterion: l1 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/2D_rn50_UNet.yml b/experiments/baselines/2D_rn50_UNet.yml deleted file mode 100644 index fa3d0fe..0000000 --- a/experiments/baselines/2D_rn50_UNet.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - weight: 'gaussian' - step: 2 - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: UNet -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/2D_rn50_UNet_cm.yml b/experiments/baselines/2D_rn50_UNet_cm.yml deleted file mode 100644 index 04ae194..0000000 --- a/experiments/baselines/2D_rn50_UNet_cm.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: True - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - weight: 'gaussian' - step: 2 - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: UNet -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/2D_rn50_fpn.yml b/experiments/baselines/2D_rn50_fpn.yml deleted file mode 100644 index 1217fce..0000000 --- a/experiments/baselines/2D_rn50_fpn.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/2D_rn50_fpn_cm.yml b/experiments/baselines/2D_rn50_fpn_cm.yml deleted file mode 100644 index 2ede534..0000000 --- a/experiments/baselines/2D_rn50_fpn_cm.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: True - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_mse_tv_1176.yml b/experiments/baselines/3D_mse_tv_1176.yml deleted file mode 100644 index bb82e18..0000000 --- a/experiments/baselines/3D_mse_tv_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_mse_tv_1176_HR.yml b/experiments/baselines/3D_mse_tv_1176_HR.yml deleted file mode 100644 index bb82e18..0000000 --- a/experiments/baselines/3D_mse_tv_1176_HR.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_1176_HR.yml b/experiments/baselines/3D_perceptualnet_1176_HR.yml deleted file mode 100644 index d60347a..0000000 --- a/experiments/baselines/3D_perceptualnet_1176_HR.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176_HR - autoencoder_layers: False - pretrain: False - existing_model: 2021_02_25_05_51_33_1_3D_perceptualnet_ds_mse_tv - magnification: 4 - n_blocks: 15 - preceptual_criterion: l1 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_1176_HR_ds_ssim.yml b/experiments/baselines/3D_perceptualnet_1176_HR_ds_ssim.yml deleted file mode 100644 index 7558e60..0000000 --- a/experiments/baselines/3D_perceptualnet_1176_HR_ds_ssim.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_1176_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - window: 11 - zero_pad: False - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_cm_mse_tv.yml b/experiments/baselines/3D_perceptualnet_cm_mse_tv.yml deleted file mode 100644 index 023a537..0000000 --- a/experiments/baselines/3D_perceptualnet_cm_mse_tv.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_cm_ssim.yml b/experiments/baselines/3D_perceptualnet_cm_ssim.yml deleted file mode 100644 index 5ed11a2..0000000 --- a/experiments/baselines/3D_perceptualnet_cm_ssim.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - window: 7 - zero_pad: False - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_ds.yml b/experiments/baselines/3D_perceptualnet_ds.yml deleted file mode 100644 index 2168c02..0000000 --- a/experiments/baselines/3D_perceptualnet_ds.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - zero_pad: True - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_ds_mse_tv.yml b/experiments/baselines/3D_perceptualnet_ds_mse_tv.yml deleted file mode 100644 index 3e388ed..0000000 --- a/experiments/baselines/3D_perceptualnet_ds_mse_tv.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_ds_repeat.yml b/experiments/baselines/3D_perceptualnet_ds_repeat.yml deleted file mode 100644 index 335cc40..0000000 --- a/experiments/baselines/3D_perceptualnet_ds_repeat.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - zero_pad: False - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/baselines/3D_perceptualnet_ds_ssim.yml b/experiments/baselines/3D_perceptualnet_ds_ssim.yml deleted file mode 100644 index 22f3a38..0000000 --- a/experiments/baselines/3D_perceptualnet_ds_ssim.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 32 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - window: 7 - zero_pad: False - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/2D_perceptualnet_ds_16.yml b/experiments/convnet/2D_perceptualnet_ds_16.yml deleted file mode 100644 index 9ac173b..0000000 --- a/experiments/convnet/2D_perceptualnet_ds_16.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 80 - bs: 36 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: False # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 10 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/3D_perceptualnet.yml b/experiments/convnet/3D_perceptualnet.yml deleted file mode 100644 index 4a7b799..0000000 --- a/experiments/convnet/3D_perceptualnet.yml +++ /dev/null @@ -1,86 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - suffix: '_mag4' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 4 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/3D_perceptualnet_ds.yml b/experiments/convnet/3D_perceptualnet_ds.yml deleted file mode 100644 index 275149c..0000000 --- a/experiments/convnet/3D_perceptualnet_ds.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - suffix: '_mag4' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 4 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/3D_perceptualnet_ds_20.yml b/experiments/convnet/3D_perceptualnet_ds_20.yml deleted file mode 100644 index c3000b3..0000000 --- a/experiments/convnet/3D_perceptualnet_ds_20.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - suffix: '_mag4' # Options: _curated _reduced - parse_color: False - crop_small: [20, 20, 20] - crossmodality: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 80 - bs: 4 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/3D_perceptualnet_ds_autoencoder_16.yml b/experiments/convnet/3D_perceptualnet_ds_autoencoder_16.yml deleted file mode 100644 index e8c7c3e..0000000 --- a/experiments/convnet/3D_perceptualnet_ds_autoencoder_16.yml +++ /dev/null @@ -1,88 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_06_10_36_57_2D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 80 - bs: 2 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/3D_perceptualnet_pretrained.yml b/experiments/convnet/3D_perceptualnet_pretrained.yml deleted file mode 100644 index 0d1de94..0000000 --- a/experiments/convnet/3D_perceptualnet_pretrained.yml +++ /dev/null @@ -1,85 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - suffix: '_3d' # Options: _curated _reduced _mag4 - parse_color: False - crop_small: [16, 16, 16] - crossmodality: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: True - existing_model: dios-erc-gpu_2020_11_04_15_58_20_3D_perceptualnet_ds - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 4 - loss: combined_layers - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_leakyrelu.yml b/experiments/convnet/convnet_leakyrelu.yml deleted file mode 100644 index cdabec5..0000000 --- a/experiments/convnet/convnet_leakyrelu.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [16, 16] - crossmodality: False - # Model - architecture: convnet - upscale_input: False - add_residual: False - activation: leakyrelu - normalization: bn - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: True - imagenet_normalize_loss: True - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_mag2.yml b/experiments/convnet/convnet_mag2.yml deleted file mode 100644 index bff2b79..0000000 --- a/experiments/convnet/convnet_mag2.yml +++ /dev/null @@ -1,86 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [32, 32] - crossmodality: False - # Model - architecture: convnet - upscale_input: False - add_residual: False - activation: relu - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 2 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: True - imagenet_normalize_loss: True - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_nobn_noscaling.yml b/experiments/convnet/convnet_nobn_noscaling.yml deleted file mode 100644 index e5c563b..0000000 --- a/experiments/convnet/convnet_nobn_noscaling.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [16, 16] - crossmodality: False - # Model - architecture: convnet - upscale_input: False - add_residual: False - activation: leakyrelu - normalization: None - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: False - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_nobn_standard.yml b/experiments/convnet/convnet_nobn_standard.yml deleted file mode 100644 index bd769a2..0000000 --- a/experiments/convnet/convnet_nobn_standard.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - reduce_data: False - parse_color: False - crop_small: [16, 16] - crossmodality: False - # Model - architecture: convnet - upscale_input: False - add_residual: False - activation: relu - normalization: None - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 10 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: False - imagenet_normalize_loss: True - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_noscaling.yml b/experiments/convnet/convnet_noscaling.yml deleted file mode 100644 index 660708f..0000000 --- a/experiments/convnet/convnet_noscaling.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [16, 16] - crossmodality: False - # Model - architecture: convnet - upscale_input: False - add_residual: False - activation: leakyrelu - normalization: bn - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: False - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_residual.yml b/experiments/convnet/convnet_residual.yml deleted file mode 100644 index ac22f9a..0000000 --- a/experiments/convnet/convnet_residual.yml +++ /dev/null @@ -1,86 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [32, 32] - crossmodality: False - # Model - architecture: convnet - upscale_input: True - add_residual: True - activation: relu - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: True - imagenet_normalize_loss: True - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_standard.yml b/experiments/convnet/convnet_standard.yml deleted file mode 100644 index a97460c..0000000 --- a/experiments/convnet/convnet_standard.yml +++ /dev/null @@ -1,87 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - reduce_data: False - parse_color: False - crop_small: [16, 16] - crossmodality: False - # Model - architecture: convnet - upscale_input: False - add_residual: False - activation: relu - normalization: bn - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 10 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: False - imagenet_normalize_loss: True - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/convnet/convnet_upscale.yml b/experiments/convnet/convnet_upscale.yml deleted file mode 100644 index 48afba1..0000000 --- a/experiments/convnet/convnet_upscale.yml +++ /dev/null @@ -1,86 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [32, 32] - crossmodality: False - # Model - architecture: convnet - upscale_input: True - add_residual: False - activation: relu - pretrain: False - existing_model: dios-erc-gpu_2020_08_13_15_44_23_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_layers - log_jaccard: True - unnormalize_loss: True - imagenet_normalize_loss: True - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/2D_perceptualnet_cm.yml b/experiments/extra/2D_perceptualnet_cm.yml deleted file mode 100644 index c4150f8..0000000 --- a/experiments/extra/2D_perceptualnet_cm.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176_HR - autoencoder_layers: False - pretrain: False - existing_model: 2021_02_25_05_51_33_1_3D_perceptualnet_ds_mse_tv - magnification: 4 - n_blocks: 15 - preceptual_criterion: l1 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True- - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_deep_autoencoder_ds_1176.yml b/experiments/extra/3D_deep_autoencoder_ds_1176.yml deleted file mode 100644 index 2883c56..0000000 --- a/experiments/extra/3D_deep_autoencoder_ds_1176.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: l1 - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance - autoencoder: deep -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_deep_autoencoder_ds_1176_HR.yml b/experiments/extra/3D_deep_autoencoder_ds_1176_HR.yml deleted file mode 100644 index 063765f..0000000 --- a/experiments/extra/3D_deep_autoencoder_ds_1176_HR.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: l1 - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance - autoencoder: deep -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_deep_autoencoder_ds_16.yml b/experiments/extra/3D_deep_autoencoder_ds_16.yml deleted file mode 100644 index 700e8ca..0000000 --- a/experiments/extra/3D_deep_autoencoder_ds_16.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 8 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: l1 - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance - autoencoder: deep -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_mse_tv_1176.yml b/experiments/extra/3D_mse_tv_1176.yml deleted file mode 100644 index fd76560..0000000 --- a/experiments/extra/3D_mse_tv_1176.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 8 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_1176_HR.yml b/experiments/extra/3D_perceptualnet_1176_HR.yml deleted file mode 100644 index 0836012..0000000 --- a/experiments/extra/3D_perceptualnet_1176_HR.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176_HR - autoencoder_layers: False - pretrain: True - existing_model: 2021_02_25_05_51_33_1_3D_perceptualnet_ds_mse_tv - magnification: 4 - n_blocks: 15 - preceptual_criterion: l1 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True- - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_cm_autoencoder_encoderlayers.yml b/experiments/extra/3D_perceptualnet_cm_autoencoder_encoderlayers.yml deleted file mode 100644 index d03a626..0000000 --- a/experiments/extra/3D_perceptualnet_cm_autoencoder_encoderlayers.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_23_05_48_32_3D_deep_autoencoder_ds_16 - autoencoder_layers: True - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_cm_autoencoder_fullpass.yml b/experiments/extra/3D_perceptualnet_cm_autoencoder_fullpass.yml deleted file mode 100644 index e203da6..0000000 --- a/experiments/extra/3D_perceptualnet_cm_autoencoder_fullpass.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_23_05_48_32_3D_deep_autoencoder_ds_16 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_cm_mse_tv.yml b/experiments/extra/3D_perceptualnet_cm_mse_tv.yml deleted file mode 100644 index 78b8050..0000000 --- a/experiments/extra/3D_perceptualnet_cm_mse_tv.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 12 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_cm_perceptual_pretrained.yml b/experiments/extra/3D_perceptualnet_cm_perceptual_pretrained.yml deleted file mode 100644 index 4a928d4..0000000 --- a/experiments/extra/3D_perceptualnet_cm_perceptual_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: True - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_23_05_48_32_3D_deep_autoencoder_ds_16 - pretrain: True - existing_model: 2021_02_25_05_51_33_1_3D_perceptualnet_ds_mse_tv - magnification: 4 - n_blocks: 15 - preceptual_criterion: l1 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: perceptual_layers - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_ds_autoencoder_16.yml b/experiments/extra/3D_perceptualnet_ds_autoencoder_16.yml deleted file mode 100644 index 2d45939..0000000 --- a/experiments/extra/3D_perceptualnet_ds_autoencoder_16.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_ds_autoencoder_encoderlayers.yml b/experiments/extra/3D_perceptualnet_ds_autoencoder_encoderlayers.yml deleted file mode 100644 index a99ae7b..0000000 --- a/experiments/extra/3D_perceptualnet_ds_autoencoder_encoderlayers.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_23_05_48_32_3D_deep_autoencoder_ds_16 - autoencoder_layers: True - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_ds_autoencoder_fullpass.yml b/experiments/extra/3D_perceptualnet_ds_autoencoder_fullpass.yml deleted file mode 100644 index 465bdf4..0000000 --- a/experiments/extra/3D_perceptualnet_ds_autoencoder_fullpass.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_23_05_48_32_3D_deep_autoencoder_ds_16 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_ds_autoencoder_layers.yml b/experiments/extra/3D_perceptualnet_ds_autoencoder_layers.yml deleted file mode 100644 index ca9f629..0000000 --- a/experiments/extra/3D_perceptualnet_ds_autoencoder_layers.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_03_02_13_44_12_3D_deep_autoencoder_ds_16 - autoencoder_layers: True - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_ds_mse_tv.yml b/experiments/extra/3D_perceptualnet_ds_mse_tv.yml deleted file mode 100644 index 0116103..0000000 --- a/experiments/extra/3D_perceptualnet_ds_mse_tv.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_perceptualnet_ds_perceptual_pretrained.yml b/experiments/extra/3D_perceptualnet_ds_perceptual_pretrained.yml deleted file mode 100644 index 7a5ace6..0000000 --- a/experiments/extra/3D_perceptualnet_ds_perceptual_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: True - existing_model: 2021_02_21_11_12_11_3D_perceptualnet_ds_mse_tv - magnification: 4 - n_blocks: 15 - preceptual_criterion: l1 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: perceptual_layers - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/3D_shallow_autoencoder_ds_16.yml b/experiments/extra/3D_shallow_autoencoder_ds_16.yml deleted file mode 100644 index 0910de6..0000000 --- a/experiments/extra/3D_shallow_autoencoder_ds_16.yml +++ /dev/null @@ -1,90 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 18 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - weight: 'gaussian' - step: 2 - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance - autoencoder: shallow -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_MSE_enhancenet.yml b/experiments/extra/config_MSE_enhancenet.yml deleted file mode 100644 index 34e9ae6..0000000 --- a/experiments/extra/config_MSE_enhancenet.yml +++ /dev/null @@ -1,76 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [32, 32] - architecture: 'enhance' - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: mse - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_MSE_fpn.yml b/experiments/extra/config_MSE_fpn.yml deleted file mode 100644 index a30dc18..0000000 --- a/experiments/extra/config_MSE_fpn.yml +++ /dev/null @@ -1,76 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [32, 32] - architecture: 'encoderdecoder' - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: mse - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_PSNR_enhancenet.yml b/experiments/extra/config_PSNR_enhancenet.yml deleted file mode 100644 index dac3acd..0000000 --- a/experiments/extra/config_PSNR_enhancenet.yml +++ /dev/null @@ -1,76 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [32, 32] - architecture: 'enhance' - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: psnr - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_PSNR_fpn.yml b/experiments/extra/config_PSNR_fpn.yml deleted file mode 100644 index 6f4041d..0000000 --- a/experiments/extra/config_PSNR_fpn.yml +++ /dev/null @@ -1,76 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [32, 32] - architecture: 'encoderdecoder' - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: psnr - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_combined_enhancenet_0001.yml b/experiments/extra/config_combined_enhancenet_0001.yml deleted file mode 100644 index 4ba5a5e..0000000 --- a/experiments/extra/config_combined_enhancenet_0001.yml +++ /dev/null @@ -1,79 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - reduce_data: False - parse_color: False - crop_small: [64, 64] - architecture: enhance - crossmodality: False - pretrain: False - existing_model: dios-erc-gpu_2020_07_17_08_22_29_enhance_combined - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: combined - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_enhancenet_high_lr.yml b/experiments/extra/config_enhancenet_high_lr.yml deleted file mode 100644 index 44627da..0000000 --- a/experiments/extra/config_enhancenet_high_lr.yml +++ /dev/null @@ -1,79 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - reduce_data: False - parse_color: False - crop_small: [64, 64] - architecture: enhance - crossmodality: False - pretrain: False - existing_model: dios-erc-gpu_2020_07_17_08_22_29_enhance_combined - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: combined - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_l1_enhancenet_0001.yml b/experiments/extra/config_l1_enhancenet_0001.yml deleted file mode 100644 index af80631..0000000 --- a/experiments/extra/config_l1_enhancenet_0001.yml +++ /dev/null @@ -1,79 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - reduce_data: False - parse_color: False - crop_small: [64, 64] - architecture: enhance - crossmodality: False - pretrain: False - existing_model: dios-erc-gpu_2020_07_17_08_22_29_enhance_combined - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: 'L1' - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_perceptual_enhancenet.yml b/experiments/extra/config_perceptual_enhancenet.yml deleted file mode 100644 index a074af3..0000000 --- a/experiments/extra/config_perceptual_enhancenet.yml +++ /dev/null @@ -1,77 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - reduce_data: False - parse_color: False - crop_small: [64, 64] - architecture: enhance - crossmodality: False - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 12 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: perceptual - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/config_perceptual_enhancenet_0001.yml b/experiments/extra/config_perceptual_enhancenet_0001.yml deleted file mode 100644 index a15f09e..0000000 --- a/experiments/extra/config_perceptual_enhancenet_0001.yml +++ /dev/null @@ -1,79 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - experiment: 3D - calc_meanstd: True - reduce_data: False - parse_color: False - crop_small: [64, 64] - architecture: enhance - crossmodality: False - pretrain: False - existing_model: dios-erc-gpu_2020_07_17_08_22_29_enhance_combined - #crop_large: [128, 128] - magnification: 4 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: perceptual - log_jaccard: true - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/rn18_UNet.yml b/experiments/extra/rn18_UNet.yml deleted file mode 100644 index 3c3d82f..0000000 --- a/experiments/extra/rn18_UNet.yml +++ /dev/null @@ -1,89 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: True - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet18 - decoder: UNet -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/rn18_fpn.yml b/experiments/extra/rn18_fpn.yml deleted file mode 100644 index 997f8b5..0000000 --- a/experiments/extra/rn18_fpn.yml +++ /dev/null @@ -1,89 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: True - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet18 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/rn34_UNet.yml b/experiments/extra/rn34_UNet.yml deleted file mode 100644 index ceba9fc..0000000 --- a/experiments/extra/rn34_UNet.yml +++ /dev/null @@ -1,89 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: UNet -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/rn34_fpn.yml b/experiments/extra/rn34_fpn.yml deleted file mode 100644 index 6dd804c..0000000 --- a/experiments/extra/rn34_fpn.yml +++ /dev/null @@ -1,89 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/rn50_UNet_mse_tv.yml b/experiments/extra/rn50_UNet_mse_tv.yml deleted file mode 100644 index 660c7b0..0000000 --- a/experiments/extra/rn50_UNet_mse_tv.yml +++ /dev/null @@ -1,89 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: UNet -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/rn50_fpn.yml b/experiments/extra/rn50_fpn.yml deleted file mode 100644 index b3c10f1..0000000 --- a/experiments/extra/rn50_fpn.yml +++ /dev/null @@ -1,89 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/extra/rn50_fpn_mse_tv.yml b/experiments/extra/rn50_fpn_mse_tv.yml deleted file mode 100644 index b3c10f1..0000000 --- a/experiments/extra/rn50_fpn_mse_tv.yml +++ /dev/null @@ -1,89 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_mse_tv_IVD_4x.yml b/experiments/ivd/2D_mse_tv_IVD_4x.yml deleted file mode 100644 index bd77b55..0000000 --- a/experiments/ivd/2D_mse_tv_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_mse_tv_IVD_8x.yml b/experiments/ivd/2D_mse_tv_IVD_8x.yml deleted file mode 100644 index ac637dd..0000000 --- a/experiments/ivd/2D_mse_tv_IVD_8x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_FR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 8 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_perceptual_tv_IVD_4x.yml b/experiments/ivd/2D_perceptual_tv_IVD_4x.yml deleted file mode 100644 index 639f021..0000000 --- a/experiments/ivd/2D_perceptual_tv_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_perceptual_tv_IVD_4x_pretrained.yml b/experiments/ivd/2D_perceptual_tv_IVD_4x_pretrained.yml deleted file mode 100644 index 4bbeecd..0000000 --- a/experiments/ivd/2D_perceptual_tv_IVD_4x_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_05_20_15_04_07_2D_perceptual_tv_1176_HR_fulldata_seed42 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml b/experiments/ivd/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index caec929..0000000 --- a/experiments/ivd/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_10_12_34_40_2D_perceptual_tv_1176_seed30 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_perceptual_tv_IVD_8x.yml b/experiments/ivd/2D_perceptual_tv_IVD_8x.yml deleted file mode 100644 index fa84f8a..0000000 --- a/experiments/ivd/2D_perceptual_tv_IVD_8x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_FR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 8 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_ssim_IVD_4x.yml b/experiments/ivd/2D_ssim_IVD_4x.yml deleted file mode 100644 index 1b100b8..0000000 --- a/experiments/ivd/2D_ssim_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/2D_ssim_IVD_8x.yml b/experiments/ivd/2D_ssim_IVD_8x.yml deleted file mode 100644 index 83322d9..0000000 --- a/experiments/ivd/2D_ssim_IVD_8x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_FR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 8 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml b/experiments/ivd/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index b346a1c..0000000 --- a/experiments/ivd/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_05_05_11_05_36_3D_mse_tv_1176_seed10 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_mse_tv_IVD_4x.yml b/experiments/ivd_new/2D_mse_tv_IVD_4x.yml deleted file mode 100644 index a335288..0000000 --- a/experiments/ivd_new/2D_mse_tv_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_mse_tv_IVD_4x_pretrained.yml b/experiments/ivd_new/2D_mse_tv_IVD_4x_pretrained.yml deleted file mode 100644 index fa57144..0000000 --- a/experiments/ivd_new/2D_mse_tv_IVD_4x_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_02_57_26_2D_mse_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_mse_tv_IVD_4x_pretrained_isotropic.yml b/experiments/ivd_new/2D_mse_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index 07adbd1..0000000 --- a/experiments/ivd_new/2D_mse_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_02_57_26_2D_mse_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_perceptual_tv_IVD_4x.yml b/experiments/ivd_new/2D_perceptual_tv_IVD_4x.yml deleted file mode 100644 index 8eb9fd4..0000000 --- a/experiments/ivd_new/2D_perceptual_tv_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_05_20_15_04_07_2D_perceptual_tv_1176_HR_fulldata_seed42 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_perceptual_tv_IVD_4x_pretrained.yml b/experiments/ivd_new/2D_perceptual_tv_IVD_4x_pretrained.yml deleted file mode 100644 index caac026..0000000 --- a/experiments/ivd_new/2D_perceptual_tv_IVD_4x_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_04_09_01_2D_perceptual_tv_1176_seed50 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml b/experiments/ivd_new/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index d0dfc1d..0000000 --- a/experiments/ivd_new/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_10_12_34_40_2D_perceptual_tv_1176_seed30 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_ssim_IVD_4x.yml b/experiments/ivd_new/2D_ssim_IVD_4x.yml deleted file mode 100644 index 21db0f2..0000000 --- a/experiments/ivd_new/2D_ssim_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_ssim_IVD_4x_pretrained.yml b/experiments/ivd_new/2D_ssim_IVD_4x_pretrained.yml deleted file mode 100644 index 4a1c6dd..0000000 --- a/experiments/ivd_new/2D_ssim_IVD_4x_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_03_41_08_2D_ssim_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/2D_ssim_IVD_4x_pretrained_isotropic.yml b/experiments/ivd_new/2D_ssim_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index b1dea1c..0000000 --- a/experiments/ivd_new/2D_ssim_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_03_41_08_2D_ssim_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_mse_tv_IVD_4x_isotropic.yml b/experiments/ivd_new/3D_mse_tv_IVD_4x_isotropic.yml deleted file mode 100644 index e5ecde0..0000000 --- a/experiments/ivd_new/3D_mse_tv_IVD_4x_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_mse_tv_IVD_4x_pretrained_isotropic.yml b/experiments/ivd_new/3D_mse_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index 01f42c5..0000000 --- a/experiments/ivd_new/3D_mse_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_isotropic.yml b/experiments/ivd_new/3D_perceptual_tv_IVD_4x_isotropic.yml deleted file mode 100644 index 243cb02..0000000 --- a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_10_12_31_46_3D_mse_tv_1176_seed30 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_isotropic_largecrop.yml b/experiments/ivd_new/3D_perceptual_tv_IVD_4x_isotropic_largecrop.yml deleted file mode 100644 index 8fc222e..0000000 --- a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_isotropic_largecrop.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR_largecrop' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_10_12_31_46_3D_mse_tv_1176_seed30 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml b/experiments/ivd_new/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index da80e35..0000000 --- a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_pretrained_isotropic_largecrop.yml b/experiments/ivd_new/3D_perceptual_tv_IVD_4x_pretrained_isotropic_largecrop.yml deleted file mode 100644 index 5c886b8..0000000 --- a/experiments/ivd_new/3D_perceptual_tv_IVD_4x_pretrained_isotropic_largecrop.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR_largecrop' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_ssim_IVD_4x_isotropic.yml b/experiments/ivd_new/3D_ssim_IVD_4x_isotropic.yml deleted file mode 100644 index 903fc88..0000000 --- a/experiments/ivd_new/3D_ssim_IVD_4x_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/ivd_new/3D_ssim_IVD_4x_pretrained_isotropic.yml b/experiments/ivd_new/3D_ssim_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index 242e27a..0000000 --- a/experiments/ivd_new/3D_ssim_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_mse_tv_1176.yml b/experiments/run/2D_mse_tv_1176.yml similarity index 100% rename from experiments/wacv_experiments/2D_mse_tv_1176.yml rename to experiments/run/2D_mse_tv_1176.yml diff --git a/experiments/run/2D_mse_tv_IVD_4x.yml b/experiments/run/2D_mse_tv_IVD_4x.yml deleted file mode 100644 index a335288..0000000 --- a/experiments/run/2D_mse_tv_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/2D_mse_tv_IVD_4x_pretrained.yml b/experiments/run/2D_mse_tv_IVD_4x_pretrained.yml deleted file mode 100644 index fa57144..0000000 --- a/experiments/run/2D_mse_tv_IVD_4x_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_02_57_26_2D_mse_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/2D_mse_tv_IVD_4x_pretrained_isotropic.yml b/experiments/run/2D_mse_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index 07adbd1..0000000 --- a/experiments/run/2D_mse_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_02_57_26_2D_mse_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_perceptual_tv_1176.yml b/experiments/run/2D_perceptual_tv_1176.yml similarity index 100% rename from experiments/wacv_experiments/2D_perceptual_tv_1176.yml rename to experiments/run/2D_perceptual_tv_1176.yml diff --git a/experiments/run/2D_perceptual_tv_IVD_4x.yml b/experiments/run/2D_perceptual_tv_IVD_4x.yml deleted file mode 100644 index 8eb9fd4..0000000 --- a/experiments/run/2D_perceptual_tv_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_05_20_15_04_07_2D_perceptual_tv_1176_HR_fulldata_seed42 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/2D_perceptual_tv_IVD_4x_pretrained.yml b/experiments/run/2D_perceptual_tv_IVD_4x_pretrained.yml deleted file mode 100644 index caac026..0000000 --- a/experiments/run/2D_perceptual_tv_IVD_4x_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_04_09_01_2D_perceptual_tv_1176_seed50 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml b/experiments/run/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index d0dfc1d..0000000 --- a/experiments/run/2D_perceptual_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_10_12_34_40_2D_perceptual_tv_1176_seed30 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_ssim_1176.yml b/experiments/run/2D_ssim_1176.yml similarity index 100% rename from experiments/wacv_experiments/2D_ssim_1176.yml rename to experiments/run/2D_ssim_1176.yml diff --git a/experiments/run/2D_ssim_IVD_4x.yml b/experiments/run/2D_ssim_IVD_4x.yml deleted file mode 100644 index 21db0f2..0000000 --- a/experiments/run/2D_ssim_IVD_4x.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/2D_ssim_IVD_4x_pretrained.yml b/experiments/run/2D_ssim_IVD_4x_pretrained.yml deleted file mode 100644 index 4a1c6dd..0000000 --- a/experiments/run/2D_ssim_IVD_4x_pretrained.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_03_41_08_2D_ssim_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/2D_ssim_IVD_4x_pretrained_isotropic.yml b/experiments/run/2D_ssim_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index b1dea1c..0000000 --- a/experiments/run/2D_ssim_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_2D_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_03_41_08_2D_ssim_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/3D_mse_tv_1176.yml b/experiments/run/3D_mse_tv_1176.yml similarity index 100% rename from experiments/wacv_experiments/3D_mse_tv_1176.yml rename to experiments/run/3D_mse_tv_1176.yml diff --git a/experiments/run/3D_mse_tv_IVD_4x_isotropic.yml b/experiments/run/3D_mse_tv_IVD_4x_isotropic.yml deleted file mode 100644 index 740ba45..0000000 --- a/experiments/run/3D_mse_tv_IVD_4x_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_3D_isotropic_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/3D_mse_tv_IVD_4x_pretrained_isotropic.yml b/experiments/run/3D_mse_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index 5f7afde..0000000 --- a/experiments/run/3D_mse_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_3D_isotropic_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/3D_perceptual_tv_1176.yml b/experiments/run/3D_perceptual_tv_1176.yml similarity index 100% rename from experiments/wacv_experiments/3D_perceptual_tv_1176.yml rename to experiments/run/3D_perceptual_tv_1176.yml diff --git a/experiments/run/3D_perceptual_tv_IVD_4x_isotropic.yml b/experiments/run/3D_perceptual_tv_IVD_4x_isotropic.yml deleted file mode 100644 index dbb513b..0000000 --- a/experiments/run/3D_perceptual_tv_IVD_4x_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_3D_isotropic_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_10_12_31_46_3D_mse_tv_1176_seed30 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/3D_perceptual_tv_IVD_4x_isotropic_largecrop.yml b/experiments/run/3D_perceptual_tv_IVD_4x_isotropic_largecrop.yml deleted file mode 100644 index b4fff48..0000000 --- a/experiments/run/3D_perceptual_tv_IVD_4x_isotropic_largecrop.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_3D_isotropic_HR_largecrop' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_10_12_31_46_3D_mse_tv_1176_seed30 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml b/experiments/run/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index a41b3dd..0000000 --- a/experiments/run/3D_perceptual_tv_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_3D_isotropic_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/3D_perceptual_tv_IVD_4x_pretrained_isotropic_largecrop.yml b/experiments/run/3D_perceptual_tv_IVD_4x_pretrained_isotropic_largecrop.yml deleted file mode 100644 index 5c886b8..0000000 --- a/experiments/run/3D_perceptual_tv_IVD_4x_pretrained_isotropic_largecrop.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_isotropic_3D_HR_largecrop' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/3D_ssim_1176.yml b/experiments/run/3D_ssim_1176.yml similarity index 100% rename from experiments/wacv_experiments/3D_ssim_1176.yml rename to experiments/run/3D_ssim_1176.yml diff --git a/experiments/run/3D_ssim_IVD_4x_isotropic.yml b/experiments/run/3D_ssim_IVD_4x_isotropic.yml deleted file mode 100644 index 110b178..0000000 --- a/experiments/run/3D_ssim_IVD_4x_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_3D_isotropic_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run/3D_ssim_IVD_4x_pretrained_isotropic.yml b/experiments/run/3D_ssim_IVD_4x_pretrained_isotropic.yml deleted file mode 100644 index b1fcdb1..0000000 --- a/experiments/run/3D_ssim_IVD_4x_pretrained_isotropic.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_IVD_3D_isotropic_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: True - existing_model: 2021_06_11_21_42_17_3D_perceptual_tv_1176_seed20 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 16 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run_autoencoder/1_3D_deep_autoencoder_ds_16.yml b/experiments/run_autoencoder/1_3D_deep_autoencoder_ds_16.yml deleted file mode 100644 index 9de1eef..0000000 --- a/experiments/run_autoencoder/1_3D_deep_autoencoder_ds_16.yml +++ /dev/null @@ -1,88 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [8, 8, 8] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 100 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: l1 - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance - autoencoder: deep -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run_autoencoder/3D_deep_autoencoder_ds_1176_HR.yml b/experiments/run_autoencoder/3D_deep_autoencoder_ds_1176_HR.yml deleted file mode 100644 index 12ad4c9..0000000 --- a/experiments/run_autoencoder/3D_deep_autoencoder_ds_1176_HR.yml +++ /dev/null @@ -1,88 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 24 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: l1 - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance - autoencoder: deep -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run_gan/3D_perceptualnet_ds_autoencoder_16.yml b/experiments/run_gan/3D_perceptualnet_ds_autoencoder_16.yml deleted file mode 100644 index 017bf13..0000000 --- a/experiments/run_gan/3D_perceptualnet_ds_autoencoder_16.yml +++ /dev/null @@ -1,88 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - suffix: '_3d_duplicates' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 6 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: autoencoder_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run_gan/config_test.yml b/experiments/run_gan/config_test.yml deleted file mode 100644 index cbec6d4..0000000 --- a/experiments/run_gan/config_test.yml +++ /dev/null @@ -1,109 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - - accumulate_grad: - D: False - G: False - - accumulate_grad_in_iter: - D: False - G: False - - train_starts_at_epoch: - D: 0 - G: 0 -gan: - classes: 2 - latent_size: 16 - warmup_batches: 10 - sample_interval: 500 - lambda_adv: 0.005 # Adversarial loss weight - lambda_pixel: 0.01 # Pixel loss weight -training: - # General - experiment: 3D - calc_meanstd: False - reduce_data: False - parse_color: False - crop_small: [24, 24] - crossmodality: False - # Model - architecture: perceptualnet - activation: relu - normalization: in - upscale_input: True # In PerceptualNet = resize-convolution - pretrain: False - existing_model: dios-erc-gpu_2020_08_19_12_02_50_gan_enhance_combined_0.0001_mag4 - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 30 - bs: 2 - # Loss parameters: possible losses are "mse", "bce", "jaccard" and "combined" - loss: combined_layers - log_jaccard: true - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 30 - rotation: [-10, 10] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [1, 5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [5, 15] - contrast: 0.2 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - D: - real: - cate: ItemLoader - batches_per_iter: 1 - data_key: data - target_key: target - fake: - cate: FakeSampler - batches_per_iter: 1 - data_key: data - target_key: target - G: - noise: - cate: GaussianNoiseSampler - batches_per_iter: 1 - data_key: latent - target_key: valid - eval: - data_provider: - G: - noise: - cate: GaussianNoiseSampler - batches_per_iter: 1 - data_key: latent - target_key: valid \ No newline at end of file diff --git a/experiments/run_segmentation/rn50_UNet.yml b/experiments/run_segmentation/rn50_UNet.yml deleted file mode 100644 index 738f6e5..0000000 --- a/experiments/run_segmentation/rn50_UNet.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - weight: 'gaussian' - step: 2 - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: UNet -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/run_segmentation/rn50_fpn.yml b/experiments/run_segmentation/rn50_fpn.yml deleted file mode 100644 index 11f6152..0000000 --- a/experiments/run_segmentation/rn50_fpn.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - segmentation: True - suffix: '' # Options: _curated _reduced - parse_color: False - crop_small: [32, 32] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 60 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: False - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_mse_tv_dental.yml b/experiments/wacv_experiments/2D_mse_tv_dental.yml deleted file mode 100644 index 95e1da4..0000000 --- a/experiments/wacv_experiments/2D_mse_tv_dental.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_dental_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 128 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: mse_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x.yml b/experiments/wacv_experiments/2D_perceptual_tv_1176_8x.yml deleted file mode 100644 index b21443a..0000000 --- a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 1 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 8 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s2.yml b/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s2.yml deleted file mode 100644 index 57c6bdc..0000000 --- a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s2.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 2 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 8 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s3.yml b/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s3.yml deleted file mode 100644 index 7919982..0000000 --- a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s3.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 3 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 8 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s4.yml b/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s4.yml deleted file mode 100644 index 01409e6..0000000 --- a/experiments/wacv_experiments/2D_perceptual_tv_1176_8x_s4.yml +++ /dev/null @@ -1,93 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_HR_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - antialiasing: 9 - sigma: 4 - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 8 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_perceptual_tv_dental.yml b/experiments/wacv_experiments/2D_perceptual_tv_dental.yml deleted file mode 100644 index 51d6aa9..0000000 --- a/experiments/wacv_experiments/2D_perceptual_tv_dental.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_dental_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 128 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: combined_tv - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_rn50_UNet.yml b/experiments/wacv_experiments/2D_rn50_UNet.yml deleted file mode 100644 index 57a1dbb..0000000 --- a/experiments/wacv_experiments/2D_rn50_UNet.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - segmentation: True - threshold: 63 - suffix: '_1176_HR_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 20 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - weight: 'gaussian' - step: 2 - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: UNet -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_rn50_fpn.yml b/experiments/wacv_experiments/2D_rn50_fpn.yml deleted file mode 100644 index 2284100..0000000 --- a/experiments/wacv_experiments/2D_rn50_fpn.yml +++ /dev/null @@ -1,92 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: False - segmentation: True - threshold: 63 - suffix: '_1176_HR_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: True - # Model - architecture: encoderdecoder - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_01_08_11_19_18_3D_autoencoder_ds_16 - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 20 - bs: 64 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: bce_combined - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 2 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet50 - decoder: FPN -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/experiments/wacv_experiments/2D_ssim_dental.yml b/experiments/wacv_experiments/2D_ssim_dental.yml deleted file mode 100644 index 7b208a9..0000000 --- a/experiments/wacv_experiments/2D_ssim_dental.yml +++ /dev/null @@ -1,91 +0,0 @@ -strategy: - # Support for 'train' or 'eval' stages only - stage_names: - - train - - eval - accumulate_grad: - SR: False - accumulate_grad_in_iter: - SR: False - train_starts_at_epoch: - SR: 0 -training: - # General - calc_meanstd: True - suffix: '_1176_dental_2D' # Options: _curated _reduced - parse_color: False - crop_small: [16, 16] - crossmodality: False - rgb: False - # Model - architecture: perceptualnet - upscale_input: True # In PerceptualNet = resize-convolution - add_residual: False - activation: relu - normalization: in - autoencoder_pretrained: 2021_02_28_10_03_55_3D_deep_autoencoder_ds_1176 - autoencoder_layers: False - pretrain: False - existing_model: dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit - magnification: 4 - n_blocks: 15 - # Training - wd: 0.0001 - lr: 0.0001 - n_folds: 4 - epochs: 50 - bs: 128 - # Loss parameters: possible losses are "mse", "bce", "jaccard", "perceptual", "l1" and "combined" - loss: ssim - train_loss: True - log_jaccard: True - imagenet_normalize_loss: False - gram: True # Calculate Gram matrix on Perceptual loss - # LR reduction - use_LR_red: True - patience: 5 - factor: 0.1 - eps: 1e-7 -transforms: - probability: 0.5 - scale: [0.9, 1.1] - translation: 5 - rotation: [-30, 30] - shear: [-0.1, 0.1] - gamma: [0.7, 1.5] - sigma: [0.4, 1.5] - hsv: [0, 50] - gain_sp: 0.1 - gain_gn: 0.5 - brightness: [-50, 50] - contrast: 0.4 - v_range: [0.000001, 0.0009] -inference: - calc_inference: True - step: 3 - weight: 'gaussian' - threshold: 0.8 -model: - decoder_normalization: IN - n_outputs: 1 - spatial_dropout: 0.1 - bayesian_dropout: 0.5 - backbone: resnet34 - decoder: enhance -data_sampling: - train: - data_provider: - SR: - loader_train: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" - eval: - data_provider: - SR: - loader_eval: - cate: ItemLoader - batches_per_iter: 1 - data_key: "data" - target_key: "target" \ No newline at end of file diff --git a/scripts/compile_metrics.py b/scripts/compile_metrics.py deleted file mode 100644 index 8864cf4..0000000 --- a/scripts/compile_metrics.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import numpy as np -from scipy.ndimage import zoom -from tqdm import tqdm -import pandas as pd -import h5py -from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity -import cv2 -from omegaconf import OmegaConf -from pathlib import Path -from scipy.stats import pearsonr -from time import strftime -import argparse -import dill -import yaml - -from collagen.core.utils import auto_detect_device -from bone_enhance.inference.model_components import load_models -from bone_enhance.utilities import load, calculate_bvtv, threshold - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - #parser.add_argument('--dataset_root', type=Path, default=f'/media/santeri/data/BoneEnhance/Data/evaluation_ankle') - parser.add_argument('--dataset_root', type=Path, default=f'/media/santeri/data/BoneEnhance/Data/predictions_3D_clinical/ankle_experiments_eval') - parser.add_argument('--save_dir', type=Path, default='/media/santeri/data/BoneEnhance/Data') - parser.add_argument('--snapshots', type=Path, default='../../Workdir/snapshots/') - args = parser.parse_args() - - # Load ground truth BVTV - experiments = os.listdir(args.dataset_root) - experiments = [e for e in experiments if not os.path.isdir(os.path.join(args.dataset_root, e))] - - compiled_results = {'Experiment': [], 'MSE': [], 'PSNR': [], 'SSIM': []} - for experiment in experiments: - results = pd.read_excel(str(args.dataset_root / experiment), engine='openpyxl') - - # Remove unnecessary parts of experiment name - exp = experiment.split('3D', 1) - if len(exp) == 1: - exp = '2D' + experiment.split('2D', 1)[-1][:-5] - else: - exp = '3D' + exp[-1][:-5] - - # Append to list - compiled_results['Experiment'].append(exp) - compiled_results['MSE'].append(results['MSE'].iloc[-1]) - compiled_results['PSNR'].append(results['PSNR'].iloc[-1]) - compiled_results['SSIM'].append(results['SSIM'].iloc[-1]) - - # Write to excel - writer = pd.ExcelWriter(str(args.save_dir / ('metrics_compiled_' + strftime(f'_%Y_%m_%d_%H_%M_%S'))) + '.xlsx') - df1 = pd.DataFrame(compiled_results) - df1.to_excel(writer, sheet_name='Metrics') - writer.save() \ No newline at end of file diff --git a/scripts/create_ds_3d_dataset.py b/scripts/create_ds_3d_dataset.py deleted file mode 100644 index d4b7218..0000000 --- a/scripts/create_ds_3d_dataset.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -import h5py -import numpy as np -from pathlib import Path -from glob import glob -import cv2 -from bone_enhance.utilities.main import load, save, print_orthogonal, load_logfile -from skimage.transform import resize - -if __name__ == "__main__": - # Initialize experiment - images_loc = Path('/media/santeri/data/BoneEnhance/Data/target_IVD_isotropic_3D_HR_largecrop') - images_loc = Path(f'../../Data/dental/Hampaat_target') - # Save path - images_save = Path('/media/santeri/data/BoneEnhance/Data/input_IVD_isotropic_3D_HR_largecrop') - images_save = Path(f'../../Data/dental/Hampaat_input') - images_save.mkdir(exist_ok=True) - # Output resolution - - mag = 4 - res = 50 - save_h5 = True - resize_3d = True - # Antialiasing sigma - sigma = 1 - k = 5 - - # List samples - # samples = glob(str(images_loc / '*.h5')) - samples = os.listdir(images_loc) - samples.sort() - - #samples = samples[3:] - - # Resample datasets, create 3D stack - for sample in samples: - sample = str(Path(sample).name) - print(f'Processing sample: {sample}') - - # Load image stacks - if sample.endswith('.h5'): - with h5py.File(str(images_loc / sample), 'r') as f: - data = f['data'][:] - else: - data, files = load(str(images_loc / sample), rgb=True, axis=(1, 2, 0)) - - - # Downscale to input size - if resize_3d: - new_size = (data.shape[0] // mag, data.shape[1] // mag, data.shape[2] // mag) - data = resize(data, new_size, order=0, anti_aliasing=True, preserve_range=True, anti_aliasing_sigma=sigma).astype('uint8') - else: - new_size = (data.shape[2] // mag, data.shape[1] // mag) - for image in range(data.shape[0]): - - data[image, :, :] = cv2.resize( - cv2.resize( - cv2.GaussianBlur(data[image, :, :], ksize=(k, k), sigmaX=sigma, sigmaY=sigma), new_size), - (data.shape[2], data.shape[1]), cv2.INTER_CUBIC) - - # Save the cropped volume to hdf5 - fname = str(images_save / f'{sample}') - if save_h5: - with h5py.File(fname, 'w') as f: - f.create_dataset('data', data=data) - else: - save(fname, sample, data, dtype='.png') - #except (ValueError, FileNotFoundError): - # print(f'Error in sample {sample}') - # continue diff --git a/scripts/create_voi.py b/scripts/create_voi.py deleted file mode 100644 index f41043e..0000000 --- a/scripts/create_voi.py +++ /dev/null @@ -1,159 +0,0 @@ -import os -import h5py -import matplotlib.pyplot as plt -from pathlib import Path -from time import time, strftime -import pandas as pd -from tqdm import tqdm - -import numpy as np -from scipy.ndimage import zoom -import argparse -from skimage.transform import resize - -from bone_enhance.utilities import load, save, print_orthogonal, threshold -from bone_enhance.inference.thickness_analysis import _local_thickness - - -if __name__ == '__main__': - # ******************************** 3D case ************************************ - start = time() - - # Ankle experiments - path = '../../Workdir/wacv_experiments_new_2D' - snaps = os.listdir(path) - snaps = [snap for snap in snaps if os.path.isdir(os.path.join(path, snap))] - - base_path = Path('../../Data/Test set (full)') - #snap = '2021_05_28_13_52_02_2D_mse_tv_1176_seed30' - for snap in snaps: - #filter_size = 12 - parser = argparse.ArgumentParser() - parser.add_argument('--masks', type=Path, default=base_path / 'trabecular_VOI') - parser.add_argument('--save', type=Path, default=base_path / 'masks_wacv_new' / snap) - parser.add_argument('--preds', type=Path, default=base_path / 'predictions_wacv_new' / snap) - parser.add_argument('--plot', type=bool, default=True) - parser.add_argument('--scale_voi', type=bool, default=False) - parser.add_argument('--save_h5', type=bool, default=False) - parser.add_argument('--batch_id', type=int, default=None) - parser.add_argument('--resolution', type=tuple, default=(50, 50, 50)) # in µm - parser.add_argument('--mode', type=str, - choices=['med2d_dist3d_lth3d', 'stacked_2d', 'med2d_dist2d_lth3d'], - default='med2d_dist3d_lth3d') - parser.add_argument('--max_th', type=float, default=None) # in µm - parser.add_argument('--completed', type=int, default=0) - - args = parser.parse_args() - - # Sample list - samples = os.listdir(args.masks) - samples.sort() - if 'visualization' in samples: - samples.remove('visualization') - if args.batch_id is not None: - samples = [samples[args.batch_id]] - elif args.completed > 0: - samples = samples[args.completed:] - - # Remove unnecessary samples - #samples.remove('33_L6TM_1_Rec') - #samples.remove('34_R6_TL7_Rec') - #samples.remove('34_R6_TM17_Rec') - - samples_pred = os.listdir(str(args.preds)) - samples_pred.sort() - if 'visualizations' in samples_pred: - samples_pred.remove('visualizations') - - # Save paths - args.save.parent.mkdir(exist_ok=True) - args.save.mkdir(exist_ok=True) - (args.save / 'visualization').mkdir(exist_ok=True) - if args.save_h5: - (args.save / 'h5').mkdir(exist_ok=True) - - t = strftime(f'%Y_%m_%d_%H_%M') - - # Loop for samples - for idx in tqdm(range(len(samples)), desc=f'Processing snapshot {snap}'): - time_sample = time() - sample = samples[idx] - sample_pred = samples_pred[idx] - #print(f'Processing sample {sample}') - - # Load full list of files - if args.scale_voi: - files = os.listdir(str(args.masks / sample)) - files.sort() - newlist = [] - for file in files: - if file.endswith('.png') or file.endswith('.bmp') or file.endswith('.tif') \ - or file.endswith('.dcm') or file.endswith('.ima'): - try: - if file.endswith('.dcm') or file.endswith('.ima'): - newlist.append(file) - dicom = True - continue - - int(file[-7:-4]) - - # Do not load files with different prefix into the stack - if len(newlist) != 0 and file.rsplit('_', 1)[0] != newlist[-1].rsplit('_', 1)[0]: - break - - newlist.append(file) - except ValueError: - continue - files_full = newlist[:] # replace list - - - pred, _ = load(str(args.preds / sample_pred), axis=(1, 2, 0,)) - voi, files = load(str(args.masks / sample), axis=(1, 2, 0,)) - - # Rescale VOI - if args.scale_voi: - # Append the empty images - limits_full = (Path(files_full[0]).stem[-8:], Path(files_full[-1]).stem[-8:]) - limits_voi = (Path(files[0]).stem[-8:], Path(files[-1]).stem[-8:]) - array_shape = (voi.shape[0], voi.shape[1], int(limits_voi[0]) - int(limits_full[0])) - voi = np.append(np.zeros(array_shape, dtype='uint8'), voi, axis=2) - array_shape = (voi.shape[0], voi.shape[1], int(limits_full[1]) - int(limits_voi[1])) - voi = np.append(voi, np.zeros(array_shape, dtype='uint8'), axis=2) - - # Resize VOI - factor = (pred.shape[0] / voi.shape[0], pred.shape[1] / voi.shape[1], pred.shape[2] / voi.shape[2]) - voi = zoom(voi, factor, order=0) - - - # Fix size mismatch - #size = np.min((voi.shape, pred.shape), axis=0) - - if len(np.unique(pred)) != 2: - pred, _ = threshold(pred) - - if args.plot: - print_orthogonal(pred, savepath=str(args.save / 'visualization' / (sample + '_pred.png')), res=50 / 1000) - - # Apply volume-of-interest - pred = np.logical_and(pred, voi).astype(np.uint8) * 255 - - if args.plot: - print_orthogonal(pred, savepath=str(args.save / 'visualization' / (sample + '_voi.png')), res=50 / 1000) - - - # H5PY save - if args.save_h5: - savepath = args.th_maps / 'h5' / (sample + '.h5') - h5 = h5py.File(str(savepath), 'w') - h5.create_dataset('data', data=pred) - h5.close() - else: - # Save results - save(str(args.save / sample), Path(sample).stem, pred, dtype='.bmp', verbose=False) - - #dur_sample = time() - time_sample - #print(f'Sample processed in {(dur_sample % 3600) // 60} minutes, {dur_sample % 60} seconds.') - - dur = time() - start - completed = strftime(f'%Y_%m_%d_%H_%M') - print(f'Analysis completed in {(dur % 3600) // 60} minutes, {dur % 60} seconds at time {completed}.') \ No newline at end of file diff --git a/scripts/despeckle.py b/scripts/despeckle.py deleted file mode 100644 index 5bd0532..0000000 --- a/scripts/despeckle.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -from pathlib import Path -from bone_enhance.training.session import init_experiment -from bone_enhance.utilities.main import load, save, print_orthogonal -from bone_enhance.inference import largest_object -from scipy.ndimage import zoom - -if __name__ == "__main__": - # Initialize experiment - args, config, _, device = init_experiment() - images_loc = Path('/media/dios/kaappi/Santeri/Vessels') - - images_save = Path('/media/santeri/data/RabbitSegmentation/Vessels/Processed') - - images_save.mkdir(exist_ok=True) - - subdir = '' - resample = True - - # Resample large number of slices - samples = os.listdir(images_loc) - samples.sort() - samples = ['Vessel'] - #samples = samples[25:] - for sample in samples: - print(f'Processing sample: {sample}') - - - data, _ = load(str(images_loc), axis=(1, 2, 0)) - print_orthogonal(data, res=3, scale_factor=1000) - data = largest_object(data > 0, area_limit=100).astype('bool') - print_orthogonal(data, res=3, scale_factor=1000) - save(str(images_save), sample, data * 255, dtype='.bmp') - diff --git a/scripts/evaluate_3d.py b/scripts/evaluate_3d.py deleted file mode 100644 index 78e2995..0000000 --- a/scripts/evaluate_3d.py +++ /dev/null @@ -1,191 +0,0 @@ -import os -import numpy as np -from scipy.ndimage import zoom -from tqdm import tqdm -import pandas as pd -import h5py -from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity -import cv2 -from omegaconf import OmegaConf -from pathlib import Path -from scipy.stats import pearsonr -from time import time -import argparse -import dill -import yaml - -from collagen.core.utils import auto_detect_device -from bone_enhance.inference.model_components import load_models -from bone_enhance.utilities import load, calculate_bvtv, threshold - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -def evaluation_runner(args, save_dir, masks=True, suffix='_3d'): - - # Evaluation arguments - args.image_path = args.dataset_root / 'input' - args.target_path = args.dataset_root / f'target{suffix}' - args.masks = Path('/media/dios/kaappi/Sakke/Saskatoon/Verity/Registration') - args.save_dir.mkdir(exist_ok=True) - - # Snapshots to be evaluated - if type(save_dir) != list: - save_dir = [save_dir] - - # Iterate through snapshots - for snap in save_dir: - - # Initialize results - results = {'Sample': [], 'MSE': [], 'PSNR': [], 'SSIM': [], 'BVTV': []} - - # Sample list - all_samples = os.listdir(snap) - samples = [] - for i in range(len(all_samples)): - if os.path.isdir(str(snap / all_samples[i])): - samples.append(all_samples[i]) - samples.sort() - if 'visualizations' in samples: - samples.remove('visualizations') - # List the µCT target - samples_target = os.listdir(args.target_path) - samples_target.sort() - # List VOI - samples_voi = os.listdir(args.image_path) - samples_voi.sort() - - # Loop for samples - for idx, sample in tqdm(enumerate(samples), total=len(samples), desc=f'Running evaluation for snap {snap.stem}'): - #try: - # Load image stacks - with h5py.File(str(args.target_path / samples_target[idx]), 'r') as f: - target = f['data'][:] - - pred, files_pred = load(str(args.pred_path / snap.name / sample / 'conventional_segmentation_gray'), axis=(1, 2, 0), rgb=False, - n_jobs=args.num_threads) - - # Crop in case of inconsistency - crop = np.min((pred.shape, target.shape), axis=0) - target = target[:crop[0], :crop[1], :crop[2]] - pred = pred[:crop[0], :crop[1], :crop[2]].squeeze() - - # Evaluate metrics - mse = mean_squared_error(target / 255., pred / 255.) - psnr = peak_signal_noise_ratio(target / 255., pred / 255.) - ssim = structural_similarity(target / 255., pred / 255.) - - # Binarize and calculate BVTV - - # Otsu thresholding - if len(np.unique(pred)) != 2: - pred, _ = threshold(pred, method=args.threshold) - - if masks: - # Apply VOI - voi, _ = load(str(args.masks / samples_voi[idx] / 'ROI'), axis=(1, 2, 0)) - voi = zoom(voi.squeeze(), (4, 4, 4), order=0) - # Fix size mismatch - size = np.min((voi.shape, pred.shape), axis=0) - pred = np.logical_and(pred[:size[0], :size[1], :size[2]], - voi[:size[0], :size[1], :size[2]]) - - # Calculate BVTV - bvtv = calculate_bvtv(pred, voi) - else: - # Cannot calculate bvtv without reference VOI - bvtv = 0 - - #print(f'Sample {sample}: MSE = {mse}, PSNR = {psnr}, SSIM = {ssim}, BVTV: {bvtv}') - - # Update results - results['Sample'].append(sample) - results['MSE'].append(mse) - results['PSNR'].append(psnr) - results['SSIM'].append(ssim) - results['BVTV'].append(bvtv) - - #except (AttributeError, ValueError): - # print(f'Sample {sample} failing. Skipping to next one.') - # continue - - # Add average value to - results['Sample'].append('Average values') - results['MSE'].append(np.average(results['MSE'])) - results['PSNR'].append(np.average(results['PSNR'])) - results['SSIM'].append(np.average(results['SSIM'])) - results['BVTV'].append(np.average(results['BVTV'])) - - # Load ground truth BVTV - bvtv_test = pd.read_csv(str(args.bvtv_path), header=None) - pearson = pearsonr(results['BVTV'][:-1], bvtv_test.iloc[:, 1]) - results['Pearson'] = np.zeros(len(results['MSE'])) - results['Pearson'][:2] = pearson - - # Display on console - res = (results['MSE'][-1], results['PSNR'][-1], results['SSIM'][-1]) - print(f'Average results:\n' - f'MSE = {res[0]},\n' - f'PSNR = {res[1]},\n' - f'SSIM = {res[2]},\n') - print(f'Pearson correlation to µCT: {pearson[0]}, p = {pearson[1]}') - - - # Write to excel - writer = pd.ExcelWriter(str(args.save_dir / ('metrics_' + str(snap.name))) + '.xlsx') - df1 = pd.DataFrame(results) - df1.to_excel(writer, sheet_name='Metrics') - writer.save() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--dataset_root', type=Path, default=f'../../Data/Test set (KP02)') - parser.add_argument('--pred_path', type=Path, default=#f'../../Data/Test set (KP02)/predictions_cbct') - '/media/dios/kaappi/Santeri/BoneEnhance/upscaled_images') - parser.add_argument('--save_dir', type=Path, default='../../Data/Test set (KP02)/evaluation_cbct') - parser.add_argument('--bvtv_path', type=Path, default='../../Data/BVTV_test.csv') - parser.add_argument('--bs', type=int, default=4) - parser.add_argument('--magnification', type=int, default=4) - parser.add_argument('--num_threads', type=int, default=12) - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--gpus', type=int, default=2) - parser.add_argument('--threshold', type=str, choices=['otsu', 'mean'], default='otsu') - parser.add_argument('--weight', type=str, choices=['gaussian', 'mean'], default='gaussian') - # µCT snapshot - parser.add_argument('--snapshots', type=Path, default='../../Workdir/snapshots/') - args = parser.parse_args() - - # Snapshots to be evaluated - # µCT models - - snaps = ['2021_03_03_11_52_07_1_3D_mse_tv_1176_HR', # High resolution 1176 model (mse+tv) - '2021_02_25_07_51_17_1_3D_perceptualnet_cm_perceptual_pretrained', - '2021_02_26_05_52_47_3D_perceptualnet_ds_mse_tv', # Perceptualnet downscaled - '2021_02_24_12_30_02_3D_perceptualnet_cm_mse_tv', # Perceptualnet CBCT - '2021_03_25_13_51_14_rn50_UNet_bcejci', - '2021_03_25_13_51_14_rn50_fpn_bcejci', - '2021_03_31_22_06_00_2D_perceptualnet_cm', - '2021_01_08_09_49_45_2D_perceptualnet_ds_16' - ] - snaps = ['Verity_TCI_test'] - suffixes = ['_3d'] * len(snaps) - snaps = [args.snapshots / snap for snap in snaps] - - # Iterate through snapshots - args.save_dir.mkdir(exist_ok=True) - for idx, snap in enumerate(snaps): - start = time() - - # Create directories - save_dir = args.save_dir / str(snap.stem) - save_dir.mkdir(exist_ok=True) - - device = auto_detect_device() - - masks = True - evaluation_runner(args, args.pred_path / snap.stem, masks=masks, suffix=suffixes[idx]) - - dur = time() - start - print(f'Metrics evaluated in {(dur % 3600) // 60} minutes, {dur % 60} seconds.') diff --git a/scripts/evaluate_ankle_3d.py b/scripts/evaluate_ankle_3d.py deleted file mode 100644 index e4e5c50..0000000 --- a/scripts/evaluate_ankle_3d.py +++ /dev/null @@ -1,119 +0,0 @@ -import os -import numpy as np -from scipy.ndimage import zoom -from tqdm import tqdm -import pandas as pd -import h5py -from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity -import cv2 -from omegaconf import OmegaConf -from pathlib import Path -from scipy.stats import pearsonr -from time import time -import argparse -import dill -import yaml - -from collagen.core.utils import auto_detect_device -from bone_enhance.inference.model_components import load_models -from bone_enhance.utilities import load, calculate_bvtv, threshold, print_orthogonal - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -def evaluation_runner(args): - - # Evaluation arguments - args.save_dir.mkdir(exist_ok=True) - - save_dir = os.listdir(args.pred_path) - save_dir.sort() - if 'visualization' in save_dir: - save_dir.remove('visualization') - - # Snapshots to be evaluated - if type(save_dir) != list: - save_dir = [save_dir] - - # Iterate through snapshots - for experiment in save_dir: - experiment = args.pred_path / experiment - # Initialize results - results = {'Sample': [], 'MSE': [], 'PSNR': [], 'SSIM': []} - - # Sample list - all_samples = os.listdir(str(experiment)) - samples = [] - for i in range(len(all_samples)): - if os.path.isdir(str(experiment / all_samples[i])): - samples.append(all_samples[i]) - samples.sort() - if 'visualizations' in samples: - samples.remove('visualizations') - - # Loop for samples - for idx, sample in tqdm(enumerate(samples), total=len(samples), desc=f'Running evaluation for snap {experiment.stem}'): - #try: - # Load image stacks - pred, files_pred = load(str(experiment / sample), axis=(1, 2, 0), rgb=False, n_jobs=args.num_threads) - target, files_target = load(str(args.ref_path), axis=(1, 2, 0), rgb=False, n_jobs=args.num_threads) - - print_orthogonal(pred, invert=True, res=0.1, title='Predicted', cbar=True, scale_factor=10) - print_orthogonal(target, invert=True, res=0.1, title='Target', cbar=True, scale_factor=10) - - # Crop in case of inconsistency - if pred.shape != target.shape: - print('Inconsistent shapes! Cropping...') - crop = np.min((pred.shape, target.shape), axis=0) - target = target[:crop[0], :crop[1], :crop[2]] - pred = pred[:crop[0], :crop[1], :crop[2]].squeeze() - - # Evaluate metrics - mse = mean_squared_error(target / 255., pred / 255.) - psnr = peak_signal_noise_ratio(target / 255., pred / 255.) - ssim = structural_similarity(target / 255., pred / 255.) - - # Update results - results['Sample'].append(sample) - results['MSE'].append(mse) - results['PSNR'].append(psnr) - results['SSIM'].append(ssim) - - # Add average value to - results['Sample'].append('Average values') - results['MSE'].append(np.average(results['MSE'])) - results['PSNR'].append(np.average(results['PSNR'])) - results['SSIM'].append(np.average(results['SSIM'])) - - # Display on console - res = (results['MSE'][-1], results['PSNR'][-1], results['SSIM'][-1]) - print(f'Average results:\n' - f'MSE = {res[0]},\n' - f'PSNR = {res[1]},\n' - f'SSIM = {res[2]},\n') - - # Write to excel - writer = pd.ExcelWriter(str(args.save_dir / ('metrics_' + str(experiment.name))) + '.xlsx') - df1 = pd.DataFrame(results) - df1.to_excel(writer, sheet_name='Metrics') - writer.save() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--pred_path', type=Path, default=f'../../Data/predictions_3D_clinical/ankle_experiments2') - parser.add_argument('--save_dir', type=Path, default='../../Data/predictions_3D_clinical/ankle_experiments_eval2') - parser.add_argument('--ref_path', type=Path, default=f'../../Data/Test set (KP02)/ANKLE_SCALED_SMALLVOI_filtered') - parser.add_argument('--num_threads', type=int, default=12) - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--gpus', type=int, default=2) - # µCT snapshot - args = parser.parse_args() - - start = time() - - evaluation_runner(args) - - dur = time() - start - print(f'Metrics evaluated in {(dur % 3600) // 60} minutes, {dur % 60} seconds.') diff --git a/scripts/inference_tiles_3d_large_old.py b/scripts/inference_tiles_3d_large_old.py deleted file mode 100644 index 79cef02..0000000 --- a/scripts/inference_tiles_3d_large_old.py +++ /dev/null @@ -1,189 +0,0 @@ -import cv2 -import numpy as np -import os -from pathlib import Path -import argparse -import matplotlib.pyplot as plt -import dill -import torch -import torch.nn as nn -import yaml -from time import time -from tqdm import tqdm -from glob import glob -from scipy.ndimage import zoom -from omegaconf import OmegaConf -from skimage.transform import resize -import h5py - -from bone_enhance.utilities import load, save, print_orthogonal, render_volume, threshold -from bone_enhance.inference import InferenceModel, inference, largest_object, load_models, inference_3d -from bone_enhance.models import ConvNet, EnhanceNet - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -if __name__ == "__main__": - #snap = 'dios-erc-gpu_2020_11_04_14_10_25_3D_perceptualnet_scratch' # Perceptual scratch - snap = '2020_12_07_09_36_17_3D_perceptualnet_ds_20' - snap = '2020_12_10_09_16_07_3D_perceptualnet_ds_20' # Brightness and contrast augmentations applied - snap = '2020_12_11_07_10_16_3D_perceptualnet_ds_16' # Intensity augmentations applied - snap = '2020_12_14_07_26_07_3D_perceptualnet_ds_16' # Intensity and spatial augs - snap = '2020_12_21_12_58_39_3D_perceptualnet_ds_16' # 2D perceptual loss, 3D model - snap = '2021_01_05_09_21_06_3D_perceptualnet_ds_16' # Autoencoder perceptual loss, 2 folds - snap = '2021_01_11_05_41_47_3D_perceptualnet_ds_autoencoder_16' # Autoencoder, 4 folds, 2 layers - snap = '2021_02_21_11_12_11_3D_perceptualnet_ds_mse_tv' # No perceptual loss - #snap = '2021_03_02_14_55_25_1_3D_perceptualnet_ds_autoencoder_fullpass' # Trained with 1176 data, 200µm resolution - #snap = '2021_03_03_07_00_39_1_3D_perceptualnet_ds_autoencoder_fullpass' - snap = '2021_03_03_11_52_07_1_3D_mse_tv_1176_HR' # High resolution 1176 model (mse+tv) - snap = '2021_03_04_10_11_34_1_3D_mse_tv_1176' # Low resolution 1176 model (mse+tv) - - snap_path = '../../Workdir/ankle_experiments' - snaps = os.listdir(snap_path) - snaps.sort() - snaps = [snap for snap in snaps if os.path.isdir(os.path.join(snap_path, snap))] - snaps = snaps[7:] - - for snap_id in range(len(snaps)): - - snap = snaps[snap_id] - print(f'Calculating inference for snapshot: {snap} {snap_id + 1}/{len(snaps)}') - - start = time() - - parser = argparse.ArgumentParser() - parser.add_argument('--dataset_root', type=Path, default='/media/dios/kaappi/Santeri/BoneEnhance/Clinical data') - parser.add_argument('--save_dir', type=Path, default=f'../../Data/predictions_3D_clinical/ankle_experiments/{snap}') - parser.add_argument('--visualizations', type=Path, - default=f'../../Data/predictions_3D_clinical/ankle_experiments/visualization') - parser.add_argument('--bs', type=int, default=64) - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--weight', type=str, choices=['gaussian', 'mean'], default='gaussian') - parser.add_argument('--completed', type=int, default=0) - parser.add_argument('--step', type=int, default=3, help='Factor for tile step size. 1=no overlap, 2=50% overlap...') - parser.add_argument('--avg_planes', type=bool, default=False) - parser.add_argument('--cuda', type=bool, default=False, help='Whether to merge the inference tiles on GPU or CPU') - parser.add_argument('--mask', type=bool, default=False, help='Whether to remove background with postprocessing') - parser.add_argument('--scale', type=bool, default=False, help='Whether to scale prediction to full dynamic range') - parser.add_argument('--calculate_mean_std', type=bool, default=True, help='Whether to calculate individual mean and std') - parser.add_argument('--snapshot', type=Path, default=os.path.join(snap_path, snap)) - parser.add_argument('--dtype', type=str, choices=['.bmp', '.png', '.tif'], default='.bmp') - args = parser.parse_args() - - # Load snapshot configuration - with open(args.snapshot / 'config.yml', 'r') as f: - config = yaml.load(f, Loader=yaml.Loader) - config = OmegaConf.create(config) - - with open(args.snapshot / 'args.dill', 'rb') as f: - args_experiment = dill.load(f) - - with open(args.snapshot / 'split_config.dill', 'rb') as f: - split_config = dill.load(f) - args.save_dir.mkdir(exist_ok=True) - args.visualizations.mkdir(exist_ok=True) - - # Load models - device = 'cuda' # Use the second GPU for inference - - crop = config.training.crop_small - config.training.bs = args.bs - mag = config.training.magnification - if config.training.crossmodality: - cm = 'cm' - else: - cm = 'ds' - mean_std_path = args.snapshot.parent / f"mean_std_{crop}_{cm}.pth" - tmp = torch.load(mean_std_path) - mean, std = tmp['mean'], tmp['std'] - - # List the models - model_list = load_models(str(args.snapshot), config, n_gpus=args_experiment.gpus)#, fold=0) - - model = InferenceModel(model_list).to(device) - model.eval() - print(f'Found {len(model_list)} models.') - - # Load samples - # samples = [os.path.basename(x) for x in glob(str(args.dataset_root / '*XZ'))] # Load with specific name - samples = os.listdir(args.dataset_root) - samples.sort() - samples = [samples[id] for id in [2]] # Get intended samples from list - - # Skip the completed samples - if args.completed > 0: - samples = samples[args.completed:] - - # Main loop - for idx, sample in enumerate(samples): - print(f'==> Processing sample {idx + 1} of {len(samples)}: {sample}, snap {snap}') - - # Load image stacks - if sample.endswith('.h5'): - with h5py.File(str(args.dataset_root / sample), 'r') as f: - data_xy = f['data'][:] - else: - data_xy, files = load(str(args.dataset_root / sample), rgb=True, axis=(1, 2, 0)) - - # 3-channel - if len(data_xy.shape) != 4 and config.training.rgb: - data_xy = np.expand_dims(data_xy, 3) - data_xy = np.repeat(data_xy, 3, axis=3) - - x, y, z, ch = data_xy.shape - - print_orthogonal(data_xy[:, :, :, 0], invert=True, res=0.2, title='Input', cbar=True, - savepath=str(args.visualizations / (sample[:-3] + f'_{snap}_input.png')), scale_factor=10) - - # Calculate mean and std from the sample - if args.calculate_mean_std: - mean = torch.Tensor([np.mean(data_xy) / 255]) - std = torch.Tensor([np.std(data_xy) / 255]) - - # Loop for image slices - # 1st orientation - with torch.no_grad(): # Do not update gradients - prediction = inference_3d(model, args, config, data_xy, step=args.step, cuda=args.cuda, mean=mean, std=std, - weight=args.weight) - #prediction, _ = load(str(args.save_dir / sample[:-3]), axis=(1, 2, 0)) - #print_orthogonal(prediction, invert=True, res=50 / 1000, title='Output', cbar=True, - # savepath=str(args.save_dir / 'visualizations' / (sample[:-3] + '_prediction.png')), - # scale_factor=10) - - # Scale the dynamic range - pred_max = np.max(prediction) - if args.scale: - prediction -= np.min(prediction) - prediction /= pred_max - elif pred_max > 1: - print(f'Maximum value {pred_max} will be scaled to one') - prediction /= pred_max - - # Convert to uint8 - prediction = (prediction * 255).astype('uint8') - - # Background removal - if args.mask: - data_xy = zoom(data_xy[:, :, :, 0], (4, 4, 4), order=3) - #mask = np.invert(mask > 120) - #mask, _ = threshold(mask, method='otsu', block=51) - mask = largest_object(np.invert(data_xy > 120), area_limit=10000).astype('bool') - #print_orthogonal(mask) - # Set BG to 0 - prediction[mask] = 0 - # Set BG = TCI - #prediction += data_xy * mask - - # Save predicted full mask - save(str(args.save_dir / sample[:-3]), sample, prediction, dtype=args.dtype) - #render_volume(prediction, - # savepath=str(args.save_dir / 'visualizations' / (sample + '_render' + args.dtype)), - # white=True, use_outline=False) - - print_orthogonal(prediction, invert=True, res=50/1000, title='Output', cbar=True, - savepath=str(args.visualizations / (sample[:-3] + f'_{snap}_prediction.png')), - scale_factor=10) - - dur = time() - start - print(f'Inference completed in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') \ No newline at end of file diff --git a/scripts/inference_tiles_cluster.py b/scripts/inference_tiles_cluster.py deleted file mode 100644 index f5dd361..0000000 --- a/scripts/inference_tiles_cluster.py +++ /dev/null @@ -1,244 +0,0 @@ -import cv2 -import numpy as np -import os -from pathlib import Path -import argparse -import matplotlib.pyplot as plt -import dill -import torch -import torch.nn as nn -import yaml -from time import time -from tqdm import tqdm -from glob import glob -from scipy.ndimage import zoom -from omegaconf import OmegaConf -from skimage.transform import resize -import h5py - -from bone_enhance.utilities import load, save, print_orthogonal, render_volume, threshold -from bone_enhance.inference import InferenceModel, inference, largest_object, load_models, inference_3d -from bone_enhance.models import ConvNet, EnhanceNet - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -if __name__ == "__main__": - #snap = 'dios-erc-gpu_2020_11_04_14_10_25_3D_perceptualnet_scratch' # Perceptual scratch - snap = '2020_12_07_09_36_17_3D_perceptualnet_ds_20' - snap = '2020_12_10_09_16_07_3D_perceptualnet_ds_20' # Brightness and contrast augmentations applied - snap = '2020_12_11_07_10_16_3D_perceptualnet_ds_16' # Intensity augmentations applied - snap = '2020_12_14_07_26_07_3D_perceptualnet_ds_16' # Intensity and spatial augs - snap = '2020_12_21_12_58_39_3D_perceptualnet_ds_16' # 2D perceptual loss, 3D model - snap = '2021_01_05_09_21_06_3D_perceptualnet_ds_16' # Autoencoder perceptual loss, 2 folds - snap = '2021_01_11_05_41_47_3D_perceptualnet_ds_autoencoder_16' # Autoencoder, 4 folds, 2 layers - snap = '2021_02_21_11_12_11_3D_perceptualnet_ds_mse_tv' # No perceptual loss - #snap = '2021_03_02_14_55_25_1_3D_perceptualnet_ds_autoencoder_fullpass' # Trained with 1176 data, 200µm resolution - #snap = '2021_03_03_07_00_39_1_3D_perceptualnet_ds_autoencoder_fullpass' - snap = '2021_03_03_11_52_07_1_3D_mse_tv_1176_HR' # High resolution 1176 model (mse+tv) - snap = '2021_03_04_10_11_34_1_3D_mse_tv_1176' # Low resolution 1176 model (mse+tv) - - path = '../../Workdir/ankle_experiments' - snaps = os.listdir(path) - snaps.sort() - snaps = [snap for snap in snaps if os.path.isdir(os.path.join(path, snap))] - - start = time() - - parser = argparse.ArgumentParser() - parser.add_argument('--dataset_root', type=Path, default='../../Data/Clinical_data') - parser.add_argument('--save_dir', type=Path, default=f'../../Data/predictions_clinical/{snap}') - parser.add_argument('--visualizations', type=Path, - default=f'../../Data/predictions_clinical/visualization') - parser.add_argument('--bs', type=int, default=64) - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--weight', type=str, choices=['gaussian', 'mean'], default='gaussian') - parser.add_argument('--exp_id', type=int, default=0) - parser.add_argument('--step', type=int, default=1, help='Factor for tile step size. 1=no overlap, 2=50% overlap...') - parser.add_argument('--avg_planes', type=bool, default=False) - parser.add_argument('--verbose', type=bool, default=True) - parser.add_argument('--cuda', type=bool, default=False, help='Whether to merge the inference tiles on GPU or CPU') - parser.add_argument('--scale', type=bool, default=True, help='Whether to scale prediction to full dynamic range') - parser.add_argument('--calculate_mean_std', type=bool, default=True, help='Whether to calculate individual mean and std') - parser.add_argument('--dtype', type=str, choices=['.bmp', '.png', '.tif'], default='.bmp') - args = parser.parse_args() - - args.snapshot = Path(os.path.join(path, snaps[args.exp_id - 1])) - - # Load snapshot configuration - with open(args.snapshot / 'config.yml', 'r') as f: - config = yaml.load(f, Loader=yaml.Loader) - config = OmegaConf.create(config) - - with open(args.snapshot / 'args.dill', 'rb') as f: - args_experiment = dill.load(f) - - with open(args.snapshot / 'split_config.dill', 'rb') as f: - split_config = dill.load(f) - args.save_dir.mkdir(exist_ok=True) - args.visualizations.mkdir(exist_ok=True) - - is_3d = len(config.training.crop_small) == 3 - - # Load models - device = 'cuda' # Use the second GPU for inference - - crop = config.training.crop_small - config.training.bs = args.bs - mag = config.training.magnification - if config.training.crossmodality: - cm = 'cm' - else: - cm = 'ds' - mean_std_path = args.snapshot.parent / f"mean_std_{crop}_{cm}.pth" - tmp = torch.load(mean_std_path) - mean, std = tmp['mean'], tmp['std'] - - # List the models - model_list = load_models(str(args.snapshot), config, n_gpus=args_experiment.gpus)#, fold=0) - - model = InferenceModel(model_list).to(device) - model.eval() - print(f'Found {len(model_list)} models.') - - # Load samples - # samples = [os.path.basename(x) for x in glob(str(args.dataset_root / '*XZ'))] # Load with specific name - samples = os.listdir(args.dataset_root) - samples.sort() - samples = [samples[id] for id in [2]] # Get intended samples from list - - # Skip the completed samples - - # Main loop - if is_3d: - for idx, sample in enumerate(samples): - print(f'==> Processing sample {idx + 1} of {len(samples)}: {sample}') - - # Load image stacks - if sample.endswith('.h5'): - with h5py.File(str(args.dataset_root / sample), 'r') as f: - data_xy = f['data'][:] - else: - data_xy, files = load(str(args.dataset_root / sample), rgb=True, axis=(1, 2, 0)) - - # 3-channel - if len(data_xy.shape) != 4 and config.training.rgb: - data_xy = np.expand_dims(data_xy, 3) - data_xy = np.repeat(data_xy, 3, axis=3) - - x, y, z, ch = data_xy.shape - - # Input image - print_orthogonal(data_xy[:, :, :, 0], invert=True, res=0.2, title='Input', cbar=True, - savepath=str(args.visualizations / (sample[:-3] + f'_{snap}_input.png')), scale_factor=10) - - # Calculate mean and std from the sample - if args.calculate_mean_std: - mean = torch.Tensor([np.mean(data_xy) / 255]) - std = torch.Tensor([np.std(data_xy) / 255]) - - # Loop for image slices - with torch.no_grad(): # Do not update gradients - prediction = inference_3d(model, args, config, data_xy, step=args.step, cuda=args.cuda, mean=mean, std=std, - weight=args.weight) - - # Scale the dynamic range - if args.scale: - prediction -= np.min(prediction) - prediction /= np.max(prediction) - - # Convert to uint8 - prediction = (prediction * 255).astype('uint8') - - # Save predicted full mask - save(str(args.save_dir / (sample[:-3] + f'_{snap}')), sample, prediction, dtype=args.dtype) - - print_orthogonal(prediction, invert=True, res=50/1000, title='Output', cbar=True, - savepath=str(args.visualizations / (sample[:-3] + f'_{snap}_prediction.png')), - scale_factor=10) - - dur = time() - start - print(f'Inference completed in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') - else: - # Main loop - for idx, sample in enumerate(samples): - print(f'==> Processing sample {idx + 1} of {len(samples)}: {sample}') - - # Load image stacks - if sample.endswith('.h5'): - with h5py.File(str(args.dataset_root / sample), 'r') as f: - data_xy = f['data'][:] - else: - data_xy, files = load(str(args.dataset_root / sample), rgb=False, axis=(1, 2, 0)) - - if len(data_xy.shape) != 4: - data_xy = np.expand_dims(data_xy, -1) - x, y, z, ch = data_xy.shape - - print_orthogonal(data_xy[:, :, :, 0], invert=True, res=0.2, title='Input', cbar=True, - savepath=str(args.visualizations / (sample + f'_{snap}_input.png')), - scale_factor=1000) - - # Calculate mean and std from the sample - if args.calculate_mean_std: - mean = torch.Tensor([np.mean(data_xy) / 255]) - std = torch.Tensor([np.std(data_xy) / 255]) - - data_xz = np.transpose(data_xy, (0, 2, 1, 3)) # X-Z-Y-Ch - data_yz = np.transpose(data_xy, (1, 2, 0, 3)) # Y-Z-X-Ch - - # Interpolate 3rd dimension - data_xy = zoom(data_xy, zoom=(1, 1, config.training.magnification, 1)) - if args.avg_planes: - data_xz = zoom(data_xz, zoom=(1, 1, config.training.magnification, 1)) - data_yz = zoom(data_yz, zoom=(1, 1, config.training.magnification, 1)) - - # Output shape - out_xy = np.zeros((x * mag, y * mag, z * mag)) - if args.avg_planes: - out_xz = np.zeros((x * mag, z * mag, y * mag)) - out_yz = np.zeros((y * mag, z * mag, x * mag)) - - # Loop for image slices - # 1st orientation - with torch.no_grad(): # Do not update gradients - - # 1st orientation - for slice_idx in tqdm(range(data_xy.shape[2]), desc='Running inference, XY', disable=not args.verbose): - out_xy[:, :, slice_idx] = inference(model, args, config, data_xy[:, :, slice_idx, :], - weight=args.weight, step=args.step) - - # 2nd and 3rd orientation - if args.avg_planes: - for slice_idx in tqdm(range(data_xz.shape[2]), desc='Running inference, XZ', disable=not args.verbose): - out_xz[:, :, slice_idx] = inference(model, args, config, data_xz[:, :, slice_idx, :], - weight=args.weight, step=args.step) - for slice_idx in tqdm(range(data_yz.shape[2]), desc='Running inference, YZ', disable=not args.verbose): - out_yz[:, :, slice_idx] = inference(model, args, config, data_yz[:, :, slice_idx, :], - weight=args.weight, step=args.step) - - # Average probability maps - if args.avg_planes: - mask_avg = ((out_xy + np.transpose(out_xz, (0, 2, 1)) + np.transpose(out_yz, (2, 0, 1))) / 3) - - # Free memory - del out_xz, out_yz - else: - mask_avg = out_xy - - # Scale the dynamic range - # mask_avg -= np.min(mask_avg) - # mask_avg /= np.max(mask_avg) - - mask_avg = (mask_avg * 255).astype('uint8') - - # Save predicted full mask - save(str(args.save_dir / (sample[:-3] + f'_{snap}')), sample[:-3], mask_avg, dtype=args.dtype) - - print_orthogonal(mask_avg, invert=True, res=0.2 / 4, title='Output', cbar=True, - savepath=str(args.visualizations / (sample + f'_{snap}_prediction.png')), - scale_factor=1000) - - dur = time() - start - print(f'Inference completed in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') \ No newline at end of file diff --git a/scripts/inference_tiles_large_2D.py b/scripts/inference_tiles_large_2D.py deleted file mode 100644 index d567357..0000000 --- a/scripts/inference_tiles_large_2D.py +++ /dev/null @@ -1,179 +0,0 @@ -import cv2 -import numpy as np -import os -from pathlib import Path -import argparse -import matplotlib.pyplot as plt -import h5py -import dill -import torch -import torch.nn as nn -import yaml -from time import time -from tqdm import tqdm -from glob import glob -from scipy.ndimage import zoom -from skimage.transform import resize -from omegaconf import OmegaConf - -from bone_enhance.utilities import load, save, print_orthogonal, render_volume, calculate_mean_std -from bone_enhance.inference import InferenceModel, inference, largest_object, load_models -from bone_enhance.models import ConvNet, EnhanceNet - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -def main(args, config, args_experiment, sample_id=None, render=False, res=0.2, ds=False): - #Save path - args.save_dir.mkdir(exist_ok=True) - (args.save_dir / 'visualizations').mkdir(exist_ok=True) - snapshot = args.snapshot.name - - # Load models - models = glob(str(args.snapshot) + '/*fold_[0-9]_*.pth') - models.sort() - device = 'cuda' # Use the second GPU for inference - - crop = config.training.crop_small - config.training.bs = args.bs - mag = config.training.magnification - if config.training.crossmodality: - cm = 'cm' - else: - cm = 'ds' - mean_std_path = args.snapshot.parent / f'mean_std_{crop}_{cm}.pth' - tmp = torch.load(mean_std_path) - mean, std = tmp['mean'], tmp['std'] - - # List the models - model_list = load_models(str(args.snapshot), config, n_gpus=args_experiment.gpus) - - model = InferenceModel(model_list).to(device) - model.eval() - print(f'Found {len(model_list)} models.') - - # Load samples - # samples = [os.path.basename(x) for x in glob(str(args.dataset_root / '*XZ'))] # Load with specific name - samples = os.listdir(args.dataset_root) - samples.sort() - if sample_id is not None: - samples = [samples[id] for id in [sample_id]] # Get intended samples from list - - # Skip the completed samples - if args.completed > 0: - samples = samples[args.completed:] - - # Main loop - for idx, sample in enumerate(samples): - print(f'==> Processing sample {idx + 1} of {len(samples)}: {sample}') - - # Load image stacks - if sample.endswith('.h5'): - with h5py.File(str(args.dataset_root / sample), 'r') as f: - data_xy = f['data'][:] - else: - data_xy, files = load(str(args.dataset_root / sample), rgb=False, axis=(1, 2, 0), dicom=args.dicom) - - if ds: - factor = (data_xy.shape[0] // mag, data_xy.shape[1] // mag, data_xy.shape[2] // mag) - data_xy = resize(data_xy, factor, order=0, anti_aliasing=True, preserve_range=True) - - if len(data_xy.shape) != 4: - data_xy = np.expand_dims(data_xy, -1) - if config.training.rgb: - data_xy = np.repeat(data_xy, 3, axis=-1) - x, y, z, ch = data_xy.shape - - - print_orthogonal(data_xy[:, :, :, 0], invert=True, res=res, title='Input', cbar=True, - savepath=str(args.visualizations / (sample[:-3] + f'_{snapshot}_input.png')), scale_factor=100) - - # Calculate mean and std from the sample - if args.calculate_mean_std: - mean, std = calculate_mean_std(data_xy, config.training.rgb) - - - # Output shape - prediction = np.zeros((x * mag, y * mag, z)) - - # Loop for image slices - # 1st orientation - with torch.no_grad(): # Do not update gradients - - # 1st orientation - for slice_idx in tqdm(range(data_xy.shape[2]), desc='Running inference, XY'): - prediction[:, :, slice_idx] = inference(model, args, config, data_xy[:, :, slice_idx, :], - weight=args.weight, step=args.step, mean=mean, std=std) - - # Scale the dynamic range - pred_max = np.max(prediction) - if args.scale: - prediction -= np.min(prediction) - prediction /= pred_max - elif pred_max > 1: - print(f'Maximum value {pred_max} will be scaled to one') - prediction /= pred_max - - prediction = (prediction * 255).astype('uint8') - - # Save predicted full mask - save(str(args.save_dir / sample), sample, prediction, dtype=args.dtype) - - print_orthogonal(prediction, invert=True, res=res / 4, title='Output', cbar=True, - savepath=str(args.visualizations / (sample[:-3] + f'_{snapshot}_prediction.png')), - scale_factor=100) - - dur = time() - start - print(f'Inference completed in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') - - -if __name__ == "__main__": - start = time() - - snap_path = '../../Workdir/phantom_experiments' - snaps = os.listdir(snap_path) - snaps.sort() - snaps = [snap for snap in snaps if os.path.isdir(os.path.join(snap_path, snap))] - #snaps = snaps[2:] - #snaps = ['2021_05_27_08_56_20_2D_perceptual_tv_IVD_4x_pretrained_seed42'] - - for snap_id in range(len(snaps)): - - snap = snaps[snap_id] - print(f'Calculating inference for snapshot: {snap} {snap_id+1}/{len(snaps)}') - - parser = argparse.ArgumentParser() - parser.add_argument('--dataset_root', type=Path, default='../../Data/Fantomi/') - parser.add_argument('--save_dir', type=Path, - default=f'../../Data/predictions_3D_clinical/phantom_experiments/{snap}_single') - parser.add_argument('--visualizations', type=Path, - default=f'../../Data/predictions_3D_clinical/phantom_experiments/visualization') - parser.add_argument('--bs', type=int, default=64) - parser.add_argument('--step', type=int, default=3) - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--calculate_mean_std', type=bool, default=True) - parser.add_argument('--scale', type=bool, default=True) - parser.add_argument('--dicom', type=bool, default=False, help='Is DICOM format used for loading?') - parser.add_argument('--weight', type=str, choices=['gaussian', 'mean', 'pyramid'], default='gaussian') - parser.add_argument('--completed', type=int, default=0) - parser.add_argument('--sample_id', type=list, default=7, help='Process specific samples unless None.') - parser.add_argument('--snapshot', type=Path, - default=os.path.join(snap_path, snap)) - parser.add_argument('--dtype', type=str, choices=['.bmp', '.png', '.tif'], default='.bmp') - args = parser.parse_args() - #subdir = 'NN_prediction' # 'NN_prediction' - - # Load snapshot configuration - with open(args.snapshot / 'config.yml', 'r') as f: - config = yaml.load(f, Loader=yaml.Loader) - config = OmegaConf.create(config) - - with open(args.snapshot / 'args.dill', 'rb') as f: - args_experiment = dill.load(f) - - args.save_dir.parent.mkdir(exist_ok=True) - args.save_dir.mkdir(exist_ok=True) - args.visualizations.mkdir(exist_ok=True) - - main(args, config, args_experiment, sample_id=args.sample_id, res=0.2) diff --git a/scripts/inference_tiles_pseudo3d.py b/scripts/inference_tiles_pseudo3d.py deleted file mode 100644 index e641279..0000000 --- a/scripts/inference_tiles_pseudo3d.py +++ /dev/null @@ -1,173 +0,0 @@ -import cv2 -import numpy as np -import os -from pathlib import Path -import argparse -import matplotlib.pyplot as plt -import h5py -import dill -import torch -import torch.nn as nn -import yaml -from time import time -from tqdm import tqdm -from glob import glob -from scipy.ndimage import zoom -from skimage.transform import resize -from omegaconf import OmegaConf - -from bone_enhance.utilities import load, save, print_orthogonal, render_volume -from bone_enhance.inference import InferenceModel, inference, largest_object, load_models -from bone_enhance.models import ConvNet, EnhanceNet - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -if __name__ == "__main__": - start = time() - - snap = 'dios-erc-gpu_2020_10_12_09_40_33_perceptualnet_newsplit' - #snap = 'dios-erc-gpu_2020_10_19_14_09_24_3D_perceptualnet' - #snap = 'dios-erc-gpu_2020_09_30_14_14_42_perceptualnet_noscaling_3x3_cm_curated_trainloss' - snap = '2020_12_15_10_28_57_2D_perceptualnet_ds_16' # Latest 2D model with fixes, only 1 fold - snap = '2021_01_08_09_49_45_2D_perceptualnet_ds_16' # 2D model, 3 working folds - snap = '2021_03_31_22_06_00_2D_perceptualnet_cm' # 2D model, trained with CBCT images - ds = True - - parser = argparse.ArgumentParser() - parser.add_argument('--dataset_root', type=Path, default='../../Data/Test set (KP02)/target_3d') - parser.add_argument('--save_dir', type=Path, default=f'../../Data/Test set (KP02)/predictions/{snap}') - parser.add_argument('--subdir', type=Path, choices=['NN_prediction', ''], default='') - parser.add_argument('--bs', type=int, default=12) - parser.add_argument('--step', type=int, default=3) - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--weight', type=str, choices=['gaussian', 'mean', 'pyramid'], default='gaussian') - parser.add_argument('--completed', type=int, default=0) - parser.add_argument('--avg_planes', type=bool, default=True) - parser.add_argument('--snapshot', type=Path, default=f'../../Workdir/snapshots/{snap}') - parser.add_argument('--dtype', type=str, choices=['.bmp', '.png', '.tif'], default='.bmp') - args = parser.parse_args() - - # Load snapshot configuration - with open(args.snapshot / 'config.yml', 'r') as f: - config = yaml.load(f, Loader=yaml.Loader) - config = OmegaConf.create(config) - - with open(args.snapshot / 'args.dill', 'rb') as f: - args_experiment = dill.load(f) - - with open(args.snapshot / 'split_config.dill', 'rb') as f: - split_config = dill.load(f) - args.save_dir.mkdir(exist_ok=True) - (args.save_dir / 'visualizations').mkdir(exist_ok=True) - - # Load models - models = glob(str(args.snapshot) + '/*fold_[0-9]_*.pth') - #models = glob(str(args.snapshot) + '/*fold_3_*.pth') - models.sort() - #device = auto_detect_device() - device = 'cuda' # Use the second GPU for inference - - crop = config.training.crop_small - config.training.bs = args.bs - mag = config.training.magnification - mean_std_path = args.snapshot.parent / f"mean_std_{crop[0]}x{crop[1]}.pth" - tmp = torch.load(mean_std_path) - mean, std = tmp['mean'], tmp['std'] - - # List the models - model_list = load_models(str(args.snapshot), config, n_gpus=args_experiment.gpus) - - model = InferenceModel(model_list).to(device) - model.eval() - print(f'Found {len(model_list)} models.') - - # Load samples - samples = os.listdir(args.dataset_root) - samples.sort() - - # Skip the completed samples - if args.completed > 0: - samples = samples[args.completed:] - - # Main loop - for idx, sample in enumerate(samples): - print(f'==> Processing sample {idx + 1} of {len(samples)}: {sample}') - - # Load image stacks - if sample.endswith('.h5'): # Load with h5py - with h5py.File(str(args.dataset_root / sample), 'r') as f: - data_xy = f['data'][:] - else: # Load image series - data_xy, files = load(str(args.dataset_root / sample), rgb=True, axis=(1, 2, 0)) - - if ds: - factor = (data_xy.shape[0] // mag, data_xy.shape[1] // mag, data_xy.shape[2] // mag) - data_xy = resize(data_xy, factor, order=0, anti_aliasing=True, preserve_range=True) - - if len(data_xy.shape) != 4: - data_xy = np.expand_dims(data_xy, -1) - x, y, z, ch = data_xy.shape - - print_orthogonal(data_xy[:, :, :, 0], invert=True, res=0.2, title='Input', cbar=True, - savepath=str(args.save_dir / 'visualizations' / (sample + '_input.png')), - scale_factor=1000) - - data_xz = np.transpose(data_xy, (0, 2, 1, 3)) # X-Z-Y-Ch - data_yz = np.transpose(data_xy, (1, 2, 0, 3)) # Y-Z-X-Ch - - # Interpolate 3rd dimension - data_xy = zoom(data_xy, zoom=(1, 1, config.training.magnification, 1)) - data_xz = zoom(data_xz, zoom=(1, 1, config.training.magnification, 1)) - data_yz = zoom(data_yz, zoom=(1, 1, config.training.magnification, 1)) - - # Output shape - out_xy = np.zeros((x * mag, y * mag, z * mag)) - out_xz = np.zeros((x * mag, z * mag, y * mag)) - out_yz = np.zeros((y * mag, z * mag, x * mag)) - - # Loop for image slices - # 1st orientation - with torch.no_grad(): # Do not update gradients - - for slice_idx in tqdm(range(data_xy.shape[2]), desc='Running inference, XY'): - out_xy[:, :, slice_idx] = inference(model, args, config, data_xy[:, :, slice_idx, :], - weight=args.weight, step=args.step) - - # 2nd and 3rd orientation - if args.avg_planes: - for slice_idx in tqdm(range(data_xz.shape[2]), desc='Running inference, XZ'): - out_xz[:, :, slice_idx] = inference(model, args, config, data_xz[:, :, slice_idx, :], - weight=args.weight, step=args.step) - for slice_idx in tqdm(range(data_yz.shape[2]), desc='Running inference, YZ'): - out_yz[:, :, slice_idx] = inference(model, args, config, data_yz[:, :, slice_idx, :], - weight=args.weight, step=args.step) - - # Average probability maps - if args.avg_planes: - #mask_avg = ((mask_xz + np.transpose(mask_yz, (0, 2, 1))) / 2) - mask_avg = ((out_xy + np.transpose(out_xz, (0, 2, 1)) + np.transpose(out_yz, (2, 0, 1))) / 3) - else: - mask_avg = out_xy - - # Free memory - del out_xz, out_yz - - # Scale the dynamic range - #mask_avg -= np.min(mask_avg) - #mask_avg /= np.max(mask_avg) - - mask_avg = (mask_avg * 255).astype('uint8') - - # Save predicted full mask - if sample.endswith('.h5'): # remove unnecessary file name extension - sample = sample[:-3] - save(str(args.save_dir / sample), sample, mask_avg, dtype=args.dtype) - - print_orthogonal(mask_avg, invert=True, res=0.2/4, title='Output', cbar=True, - savepath=str(args.save_dir / 'visualizations' / (sample + '_prediction.png')), - scale_factor=1000) - - dur = time() - start - print(f'Inference completed in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') diff --git a/scripts/morphometric_analysis.py b/scripts/morphometric_analysis.py deleted file mode 100644 index 9d02c01..0000000 --- a/scripts/morphometric_analysis.py +++ /dev/null @@ -1,152 +0,0 @@ -""" -Calculate morphometric analysis, including the full volume. -""" - -import os -import h5py -from pathlib import Path -from openpyxl import load_workbook -from time import time, strftime -import pandas as pd -from tqdm import tqdm - -import numpy as np -import argparse - -from bone_enhance.utilities import load, print_orthogonal, threshold, calculate_bvtv -from bone_enhance.inference.thickness_analysis import _local_thickness - - -if __name__ == '__main__': - start = time() - - # Prediction path - path = Path('../../Data/target_1176_HR') - t = strftime(f'%Y_%m_%d_%H_%M') - savepath = f'../../Data/evaluation_oof_wacv/Results_target{t}.xlsx' - snaps = os.listdir(path) - snaps = [snap for snap in snaps if os.path.isdir(os.path.join(path, snap))] - snaps = [str(path)] - - for snap in snaps: - # Remove timestamp from snapshot - if snap[:2] == '2D' or snap[:2] == '3D': - snap_short = snap - else: - snap_short = snap.split('_', 6)[-1] - - # Arguments - parser = argparse.ArgumentParser() - parser.add_argument('--save', type=Path, default=path.parent / 'masks_wacv_new' / snap_short) - #parser.add_argument('--preds', type=Path, default=path / snap) - parser.add_argument('--preds', type=Path, default=path) - parser.add_argument('--final_results', type=Path, default='../../Data/final_results.csv') - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--resolution', type=tuple, default=(50, 50, 50)) # in µm - parser.add_argument('--mode', type=str, choices=['med2d_dist3d_lth3d', 'stacked_2d', 'med2d_dist2d_lth3d'], - default='med2d_dist3d_lth3d') - parser.add_argument('--trabecular_number', type=str, choices=['3d', 'plate', 'rod'], default='rod') - parser.add_argument('--max_th', type=float, default=None) # in µm - - args = parser.parse_args() - - # Sample list - samples = os.listdir(str(args.preds)) - samples.sort() - if 'visualizations' in samples: - samples.remove('visualizations') - - # Save paths - args.save.parent.mkdir(exist_ok=True) - if args.plot: - args.save.mkdir(exist_ok=True) - (args.save / 'visualization').mkdir(exist_ok=True) - - # Table for results - results = {'Sample': [], - 'Trabecular thickness': [], 'Trabecular separation': [], 'BVTV': [], 'Trabecular number': []} - - # Loop for samples - for idx in tqdm(range(len(samples)), desc=f'Processing snapshot {snap_short}'): - time_sample = time() - sample = samples[idx] - - # Load image stacks - if samples[idx].endswith('.h5'): - with h5py.File(str(args.preds / samples[idx]), 'r') as f: - pred = f['data'][:] - else: - pred, _ = load(str(args.preds / samples[idx]), rgb=False, axis=(1, 2, 0)) - # Analysis conducted on the full volume - voi = np.ones(pred.shape) - - # Binarize - if len(np.unique(pred)) != 2: - pred, _ = threshold(pred) - - if args.plot: - print_orthogonal(pred, savepath=str(args.save / 'visualization' / (sample + '_pred.png')), res=50 / 1000) - - # # - # Morphometric analysis # - # # - - # Thickness map - th_map = _local_thickness(pred, mode=args.mode, spacing_mm=args.resolution, stack_axis=1, - thickness_max_mm=args.max_th, verbose=False) - if args.plot: - print_orthogonal(th_map, cmap='hot', res=args.resolution[0] / 1000) - - # Bone volume fraction - bvtv = calculate_bvtv(pred, voi) - - # Update results - th_map = th_map[np.nonzero(th_map)].flatten() - tb_th = np.mean(th_map) - results['Sample'].append(sample) - results['Trabecular thickness'].append(tb_th) - results['BVTV'].append(bvtv) - - # Separation map - pred = np.invert(pred) - th_map = _local_thickness(pred, mode=args.mode, spacing_mm=args.resolution, stack_axis=1, - thickness_max_mm=args.max_th, verbose=False) - th_map = th_map[np.nonzero(th_map)].flatten() - tb_sep = np.mean(th_map) - results['Trabecular separation'].append(tb_sep) - - # Trabecular number - if args.trabecular_number == '3d': # 3D model - results['Trabecular number'].append(1 / (tb_sep + tb_th)) - elif args.trabecular_number == 'plate': # 2D plate model - results['Trabecular number'].append(bvtv / tb_th) - elif args.trabecular_number == 'rod': # 2D cylinder rod model - results['Trabecular number'].append(np.sqrt((4 / np.pi) * bvtv) / tb_th) - else: # Append 0 for compatibility - results['Trabecular number'].append(0) - - # # - # Statistics # - # # - - # Remove NaNs - results['Trabecular separation'] = list(np.nan_to_num(results['Trabecular separation'])) - results['Trabecular thickness'] = list(np.nan_to_num(results['Trabecular thickness'])) - results['Trabecular number'] = list(np.nan_to_num(results['Trabecular number'])) - - # Save morphometric results to excel - # Load existing file - if os.path.isfile(savepath): - book = load_workbook(savepath) - writer = pd.ExcelWriter(savepath, engine='openpyxl', mode='a') - writer.book = book - else: - writer = pd.ExcelWriter(savepath, engine='openpyxl') - # Append new results - df1 = pd.DataFrame(results) - df1.to_excel(writer, sheet_name=snap_short) - writer.save() - - dur = time() - start - completed = strftime(f'%Y_%m_%d_%H_%M') - print(f'Analysis completed in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds at time {completed}.') diff --git a/scripts/morphometric_analysis_voi.py b/scripts/morphometric_analysis_voi.py deleted file mode 100644 index 1123a54..0000000 --- a/scripts/morphometric_analysis_voi.py +++ /dev/null @@ -1,193 +0,0 @@ -""" -Calculate morphometric analysis, applying a specific volume-of-interest. -""" - -import os -from pathlib import Path -from openpyxl import load_workbook -from time import time, strftime -from scipy.stats import pearsonr -import pandas as pd -from tqdm import tqdm - -import numpy as np -import argparse - -from bone_enhance.utilities import load, save, print_orthogonal, threshold, calculate_bvtv -from bone_enhance.inference.thickness_analysis import _local_thickness - - -if __name__ == '__main__': - start = time() - - # Prediction path - path = Path('../../Data/Test_set_(full)/input_interpolated') - t = strftime(f'%Y_%m_%d_%H_%M') - savepath = f'../../Data/Test_set_(full)/masks_wacv_new/Results_conventional{t}.xlsx' - snaps = os.listdir(path) - snaps = [snap for snap in snaps if os.path.isdir(os.path.join(path, snap))] - - correlations = {'Snapshot': [], 'BVTV': [], 'Tb.Th': [], 'Tb.Sp': [], 'Tb.N': [], - 'BVTV (p)': [], 'Tb.Th (p)': [], 'Tb.Sp (p)': [], 'Tb.N (p)': []} - for snap in snaps: - # Remove timestamp from snapshot - if snap[:2] == '2D' or snap[:2] == '3D': - snap_short = snap - else: - snap_short = snap.split('_', 6)[-1] - - # Arguments - parser = argparse.ArgumentParser() - parser.add_argument('--masks', type=Path, default=path.parent / 'trabecular_VOI') - parser.add_argument('--save', type=Path, default=path.parent / 'masks_wacv_new' / snap_short) - parser.add_argument('--preds', type=Path, default=path / snap) - parser.add_argument('--ground_truth', type=Path, default='../../Data/uCT_parameters.csv') - parser.add_argument('--final_results', type=Path, default='../../Data/final_results.csv') - parser.add_argument('--plot', type=bool, default=False) - parser.add_argument('--save_masks', type=bool, default=True) - parser.add_argument('--batch_id', type=int, default=None) - parser.add_argument('--resolution', type=tuple, default=(50, 50, 50)) # in µm - parser.add_argument('--mode', type=str, choices=['med2d_dist3d_lth3d', 'stacked_2d', 'med2d_dist2d_lth3d'], - default='med2d_dist3d_lth3d') - parser.add_argument('--trabecular_number', type=str, choices=['3d', 'plate', 'rod'], default='rod') - parser.add_argument('--max_th', type=float, default=None) # in µm - - args = parser.parse_args() - - # Sample list - samples = os.listdir(args.masks) - samples.sort() - if 'visualization' in samples: - samples.remove('visualization') - if args.batch_id is not None: - samples = [samples[args.batch_id]] - - samples_pred = os.listdir(str(args.preds)) - samples_pred.sort() - if 'visualizations' in samples_pred: - samples_pred.remove('visualizations') - - # Save paths - args.save.parent.mkdir(exist_ok=True) - if args.plot or args.save_masks: - args.save.mkdir(exist_ok=True) - (args.save / 'visualization').mkdir(exist_ok=True) - - # Table for results - results = {'Sample': [], - 'Trabecular thickness': [], 'Trabecular separation': [], 'BVTV': [], 'Trabecular number': []} - - # Load ground truth values - target = pd.read_csv(args.ground_truth) - - # Loop for samples - for idx in tqdm(range(len(samples)), desc=f'Processing snapshot {snap_short}'): - time_sample = time() - sample = samples[idx] - sample_pred = samples_pred[idx] - - # Load prediction and volume-of-interest - pred, _ = load(str(args.preds / sample_pred / 'conventional_segmentation_gray'), axis=(1, 2, 0,)) - voi, files = load(str(args.masks / sample), axis=(1, 2, 0,)) - - if len(np.unique(pred)) != 2: - pred, _ = threshold(pred) - - if args.plot: - print_orthogonal(pred, savepath=str(args.save / 'visualization' / (sample + '_pred.png')), res=50 / 1000) - - # Apply volume-of-interest - pred = np.logical_and(pred, voi).astype(np.uint8) * 255 - - # Save binary mask with VOI applied - if args.save_masks: - save(str(args.save / sample), Path(sample).stem, pred, dtype='.bmp', verbose=False) - - if args.plot: - print_orthogonal(pred, savepath=str(args.save / 'visualization' / (sample + '_voi.png')), res=50 / 1000) - - # # - # Morphometric analysis # - # # - - # Thickness map - th_map = _local_thickness(pred, mode=args.mode, spacing_mm=args.resolution, stack_axis=1, - thickness_max_mm=args.max_th, verbose=False) - if args.plot: - print_orthogonal(th_map, cmap='hot', res=args.resolution[0] / 1000) - - # Bone volume fraction - bvtv = calculate_bvtv(pred, voi) / 100 - - # Update results - th_map = th_map[np.nonzero(th_map)].flatten() - tb_th = np.mean(th_map) - results['Sample'].append(sample) - results['Trabecular thickness'].append(tb_th) - results['BVTV'].append(bvtv * 100) - - # Separation map - pred = np.logical_and(np.invert(pred), voi).astype(np.uint8) * 255 - th_map = _local_thickness(pred, mode=args.mode, spacing_mm=args.resolution, stack_axis=1, - thickness_max_mm=args.max_th, verbose=False) - th_map = th_map[np.nonzero(th_map)].flatten() - tb_sep = np.mean(th_map) - results['Trabecular separation'].append(tb_sep) - - # Trabecular number - if args.trabecular_number == '3d': # 3D model - results['Trabecular number'].append(1 / (tb_sep + tb_th)) - elif args.trabecular_number == 'plate': # 2D plate model - results['Trabecular number'].append(bvtv / tb_th) - elif args.trabecular_number == 'rod': # 2D cylinder rod model - results['Trabecular number'].append(np.sqrt((4 / np.pi) * bvtv) / tb_th) - else: # Append 0 for compatibility - results['Trabecular number'].append(0) - - # # - # Statistics # - # # - - # Pearson correlation - - # Remove NaNs - results['Trabecular separation'] = list(np.nan_to_num(results['Trabecular separation'])) - results['Trabecular thickness'] = list(np.nan_to_num(results['Trabecular thickness'])) - results['Trabecular number'] = list(np.nan_to_num(results['Trabecular number'])) - - # Calculate correlations - correlations['Snapshot'].append(snap_short) - correlations['BVTV'].append(pearsonr(results['BVTV'], target['BVTV'].values)[0]) - correlations['BVTV (p)'].append(pearsonr(results['BVTV'], target['BVTV'].values)[1]) - correlations['Tb.Th'].append(pearsonr(results['Trabecular thickness'], target['Tb.Th'].values)[0]) - correlations['Tb.Th (p)'].append(pearsonr(results['Trabecular thickness'], target['Tb.Th'].values)[1]) - correlations['Tb.Sp'].append(pearsonr(results['Trabecular separation'], target['Tb.Sp'].values)[0]) - correlations['Tb.Sp (p)'].append(pearsonr(results['Trabecular separation'], target['Tb.Sp'].values)[1]) - correlations['Tb.N'].append(pearsonr(results['Trabecular number'], target['Tb.N'].values)[0]) - correlations['Tb.N (p)'].append(pearsonr(results['Trabecular number'], target['Tb.N'].values)[1]) - - # Save morphometric results to excel - # Load existing file - if os.path.isfile(savepath): - book = load_workbook(savepath) - writer = pd.ExcelWriter(savepath, engine='openpyxl', mode='a') - writer.book = book - else: - writer = pd.ExcelWriter(savepath, engine='openpyxl') - # Append new results - df1 = pd.DataFrame(results) - df1.to_excel(writer, sheet_name=snap_short) - writer.save() - - # Save correlation results to excel - # Load existing file - book = load_workbook(savepath) - writer = pd.ExcelWriter(savepath, engine='openpyxl', mode='a') - writer.book = book - df = pd.DataFrame(correlations) - df.to_excel(writer, sheet_name='Correlations') - writer.save() - - dur = time() - start - completed = strftime(f'%Y_%m_%d_%H_%M') - print(f'Analysis completed in {(dur % 3600) // 60} minutes, {dur % 60} seconds at time {completed}.') diff --git a/scripts/randomize_dataset.py b/scripts/randomize_dataset.py deleted file mode 100644 index 4d24cd8..0000000 --- a/scripts/randomize_dataset.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -import numpy as np -import pandas as pd -from pathlib import Path -import h5py -from bone_enhance.training.session import init_experiment -from bone_enhance.utilities.main import load, save, print_orthogonal -from skimage.transform import resize -from scipy.ndimage import zoom - -if __name__ == "__main__": - # Initialize experiment - args, config, _, device = init_experiment() - - - images_loc = Path(f'/media/santeri/Transcend/Super-resolution png') - #images_loc = Path(f'/media/santeri/data/BoneEnhance/Data/dental') - table_path = Path(f'/media/santeri/Transcend/Randomize_patients.xlsx') - - images_save = Path(f'/media/santeri/Transcend/randomized_dataset') - - images_save.mkdir(exist_ok=True) - - resample = False - normalize = False - factor = 4 - sigma = 1 - dtype = '.dcm' - k = 3 - hdf5 = False - - table_keys = pd.read_excel(table_path, engine='openpyxl') - table_keys = table_keys.iloc[:36, :3].values.astype('uint32') - - # Resample large number of slices - models = os.listdir(images_loc) - for table_idx, idx_random in enumerate(table_keys[:, 2]): - model = models[table_keys[table_idx, 1] - 1] - - samples = os.listdir(images_loc / model) - #samples = [name for name in samples if os.path.isdir(os.path.join(images_loc, name))] - samples.sort() - #samples = [samples[6]] - #samples = samples[16:25] - if 'visualizations' in samples: - samples.remove('visualizations') - - sample = samples[table_keys[table_idx, 0] - 1] - print(f'Processing sample: {sample}, model {model}') - #try: - - #randomized_index = np.where((table_keys[:, :2] == (1, 1)).all(axis=1)) - #randomized_index = table_keys[randomized_index, 2][0][0] - - data, files = load(str(images_loc / model / sample), rgb=False, axis=(1, 2, 0))#, dicom=True) - - # Interpolate - #data = zoom(data, (factor, factor, factor), order=3) # Tricubic - - save(str(images_save / f'CBCT_{idx_random}'), f'CBCT_{idx_random}', data, dtype=dtype) - - #except (ValueError, FileNotFoundError): - # print(f'Error in sample {sample}') - # continue diff --git a/scripts/rename_slices.py b/scripts/rename_slices.py deleted file mode 100644 index bb222b0..0000000 --- a/scripts/rename_slices.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -from pathlib import Path -from bone_enhance.training.session import init_experiment - -if __name__ == "__main__": - # Initialize experiment - args, _, _, _ = init_experiment() - base_path = args.data_location - images_loc = base_path / 'Test set (KP02)' / 'target' - #images_loc = base_path / 'input' - - # List files - samples = os.listdir(images_loc) - samples.sort() - for sample in samples: - im_path = images_loc / Path(sample) - if '_Rec' in str(im_path): - os.rename(str(im_path), str(im_path)[:-4]) - images = list(map(lambda x: x, im_path.glob('**/*[0-9].[pb][nm][gp]'))) - images.sort() - for slice in range(len(images)): - # Image - new_name = images_loc / sample / Path(sample + f'_{str(slice).zfill(8)}{str(images[slice])[-4:]}') - os.rename(images[slice], new_name) diff --git a/scripts/resample_data.py b/scripts/resample_data.py deleted file mode 100644 index b7af65c..0000000 --- a/scripts/resample_data.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -from pathlib import Path -from bone_enhance.training.session import init_experiment -from bone_enhance.utilities.main import load, save, print_orthogonal -from scipy.ndimage import zoom - -if __name__ == "__main__": - # Initialize experiment - args, config, _, device = init_experiment() - #images_loc = Path('/media/dios/kaappi/Sakke/Saskatoon/µCT/Recs_bone') - images_loc = Path('/media/santeri/data/BoneEnhance/Data/input_original') - - images_save = Path('/media/santeri/data/BoneEnhance/Data/input_coronal') - - images_save.mkdir(exist_ok=True) - - subdir = '' - resample = True - #factor = 200/2.75 - factor = 1 - #n_slices = 100 - - # Resample large number of slices - samples = os.listdir(images_loc) - samples = [name for name in samples if os.path.isdir(os.path.join(images_loc, name))] - samples.sort() - #samples = samples[25:] - for sample in samples: - print(f'Processing sample: {sample}') - try: - if resample: # Resample slices - im_path = images_loc / sample - - data, _ = load(im_path, axis=(0, 1, 2)) #axis=(1, 2, 0)) - - if factor != 1: - data = zoom(data, (1, 1, 1 / factor), order=0) # nearest interpolation - #print_orthogonal(data_resampled) - - save(str(images_save / sample), sample + '_cor', data[:, :, :], dtype='.bmp') - else: # Move segmented samples to training data - im_path = str(images_loc / sample) - files = os.listdir(im_path) - if subdir in files: - data, _ = load(im_path, axis=(1, 2, 0)) - - save(str(images_save / sample), sample + '_cor', data, dtype='.bmp') - - except ValueError: - print(f'Error in sample {sample}') - continue diff --git a/scripts/resample_exact.py b/scripts/resample_exact.py deleted file mode 100644 index 3c94abf..0000000 --- a/scripts/resample_exact.py +++ /dev/null @@ -1,45 +0,0 @@ -import cv2 -import os -import numpy as np -from pathlib import Path -from bone_enhance.training.session import init_experiment -from bone_enhance.utilities.main import load, save, print_orthogonal -from scipy.ndimage import zoom -from PIL import Image - -if __name__ == "__main__": - # Initialize experiment - #images_loc = Path('/media/dios/kaappi/Sakke/Saskatoon/µCT/Recs_bone') - images_loc = Path('/media/santeri/data/BoneEnhance/Data/target_binary') - input_loc = Path('/media/santeri/data/BoneEnhance/Data/input_original') - - images_save = Path('/media/santeri/data/BoneEnhance/Data/target_binary_resampled') - images_save.mkdir(exist_ok=True) - - # List samples - samples = os.listdir(images_loc) - samples = [name for name in samples if os.path.isdir(os.path.join(images_loc, name))] - samples_input = os.listdir(input_loc) - samples_input = [name for name in samples_input if os.path.isdir(os.path.join(input_loc, name))] - assert len(samples) == len(samples_input) - - samples.sort() - samples_input.sort() - #samples = samples[30:] - #samples_input = samples_input[30:] - for i in range(len(samples)): - print(f'Processing sample: {samples[i]}') - - input_path = input_loc / samples_input[i] - im_path = images_loc / samples[i] - - # (1, 2, 0) = Original dimension - data_input, _ = load(input_path, axis=(1, 2, 0)) # axis=(1, 2, 0)) - data, _ = load(im_path, axis=(1, 2, 0)) # axis=(1, 2, 0)) - - data_resample = np.zeros((data.shape[0], data.shape[1], data_input.shape[2])) - - for slice in range(data.shape[1]): - data_resample[:, slice, :] = cv2.resize(data[:, slice, :], (data_input.shape[2], data.shape[0])) - - save(str(images_save / samples[i]), samples[i], data_resample[:, :, :], dtype='.bmp') diff --git a/scripts/resample_exact_3D.py b/scripts/resample_exact_3D.py deleted file mode 100644 index 8328ffb..0000000 --- a/scripts/resample_exact_3D.py +++ /dev/null @@ -1,70 +0,0 @@ -import cv2 -import os -import numpy as np -from pathlib import Path -from bone_enhance.training.session import init_experiment -from bone_enhance.utilities.main import load, save, print_orthogonal -from skimage.transform import resize -import h5py -from scipy.ndimage import zoom -from PIL import Image - -if __name__ == "__main__": - # Magnification - mag = 4 - - # Initialize experiment - images_loc = Path('/media/dios/kaappi/Sakke/Saskatoon/µCT/Recs_bone') - input_loc = Path('/media/santeri/data/BoneEnhance/Data/input_original') - - images_save = Path(f'/media/santeri/data/BoneEnhance/Data/target_mag{mag}') - input_save = Path(f'/media/santeri/data/BoneEnhance/Data/input_mag{mag}') - images_save.mkdir(exist_ok=True) - input_save.mkdir(exist_ok=True) - - # List samples - samples = os.listdir(images_loc) - samples = [name for name in samples if os.path.isdir(os.path.join(images_loc, name))] - samples_input = os.listdir(input_loc) - samples_input = [name for name in samples_input if os.path.isdir(os.path.join(input_loc, name))] - # Check for consistency - assert len(samples) == len(samples_input) - - samples.sort() - samples_input.sort() - #samples = samples[22:] - samples_input = samples_input[:] - for i in range(len(samples)): - print(f'Processing sample: {samples[i]}') - - input_path = input_loc / samples_input[i] - im_path = images_loc / samples[i] - - # (1, 2, 0) = Original dimension - data_input, _ = load(input_path, axis=(1, 2, 0)) - data, _ = load(im_path, axis=(1, 2, 0)) - - print_orthogonal(data_input) - print_orthogonal(data) - - factor = (data.shape[0] * mag // data_input.shape[0], - data.shape[1] * mag // data_input.shape[1], - data.shape[2] * mag // data_input.shape[2]) - - factor = (data_input.shape[0] * mag, - data_input.shape[1] * mag, - data_input.shape[2] * mag) - print(factor) - - # Gaussian blur antialiasing and preserve 8-bit range - data = resize(data, factor, order=0, anti_aliasing=True, preserve_range=True) - - # Save target to hdf5 - fname = str(images_save / f'{samples[i]}.h5') - with h5py.File(fname, 'w') as f: - f.create_dataset('data', data=data) - - # Save input to hdf5 - fname = str(input_save / f'{samples_input[i]}.h5') - with h5py.File(fname, 'w') as f: - f.create_dataset('data', data=data_input) diff --git a/scripts/sinogram_visualization.py b/scripts/sinogram_visualization.py deleted file mode 100644 index 92729eb..0000000 --- a/scripts/sinogram_visualization.py +++ /dev/null @@ -1,176 +0,0 @@ -import os -import numpy as np -from pathlib import Path -import cv2 -import h5py -import matplotlib.pyplot as plt -import matplotlib.animation as animation -from bone_enhance.training.session import init_experiment -from bone_enhance.utilities.main import load, save, print_orthogonal, read_image_gray -from scipy.ndimage import zoom, median_filter -from skimage.transform import resize -from imageio import imread -from astra.data2d import create, get -from astra import astra_dict, algorithm -from astra.creators import create_proj_geom, create_backprojection3d_gpu, create_vol_geom, create_sino3d_gpu, create_backprojection, create_projector, create_sino - - - -if __name__ == "__main__": - images_rec = Path('/media/santeri/Transcend/train_data/reconstruction/KP03-L6-4MD2') - images_sino = Path('/media/santeri/Transcend/train_data/sinograms_old/KP03-L6-4MD2') - #images_sino = Path('/media/santeri/Transcend/DL reconstruction data/KP03-L6-4MD2/') - - images_save = Path('/media/santeri/Transcend/train_data/results') - images_save.mkdir(exist_ok=True) - - # Imaging parameters - rows = 1344 - cols = 2016 - - cols = 1364 - - pixel_size = 3.2 / 1000 # µm - obj_source = 48.31606 # mm - det_source = 271.75368 - - pixel_size = 1.5 - obj_source = 2000 # mm - det_source = 2600 - - num_of_projections = 2000 - #num_of_projections = 180 - #angles = np.linspace(0, 2 * np.pi, num=num_of_projections, endpoint=False) - angles = np.linspace(0, np.pi, num=num_of_projections, endpoint=False) - - create_sinogram = True - anim = False - - - #data, files = load(str(images_rec), rgb=False, axis=(1, 2, 0)) - data = imread(str(images_sino / 'KP03-L6-4MD2_00000426.tif')).astype(float) - data_ref = read_image_gray(images_rec, 'KP03-L6-4MD2__rec00000426.bmp')[:1364, :1364] - # Scale the 16-bit projection - data /= 255 - #vol = create_vol_geom(rows, rows) - - if create_sinogram: - # 2D reconstruction geometry - geometry_vol = create_vol_geom(cols, cols) - id_vol = create('-vol', geometry_vol, data=data_ref) - - # 2D projection geometry - geometry = create_proj_geom('fanflat', pixel_size, cols, angles, det_source, det_source - obj_source) - #geometry = create_proj_geom('parallel', pixel_size, cols, angles, det_source, det_source - obj_source) - - geom_id = create_projector('strip_fanflat', geometry, geometry_vol) - #geom_id = create_projector('line', geometry, geometry_vol) - id, sinogram = create_sino(data_ref, geom_id) - else: - - # 2D projection geometry - #geometry = create_proj_geom('fanflat', pixel_size, cols, angles, det_source, det_source - obj_source) - geometry = create_proj_geom('fanflat', pixel_size, cols, angles, det_source, 0) - id = create('-sino', geometry, data.transpose()) - - # 2D reconstruction geometry - geometry_vol = create_vol_geom(cols, cols) - id_vol = create('-vol', geometry_vol, data=0) - - # Algorithm - alg = astra_dict('FBP_CUDA') - alg['ProjectionDataId'] = id - alg['ReconstructionDataId'] = id_vol - alg_id = algorithm.create(alg) - - # Run recon - algorithm.run(alg_id) - rec = get(id_vol) - - # scale - rec[rec < 0] = 0 - rec /= np.max(rec) - rec = np.round(rec * 255).astype(np.uint8) - - #geometry = create_proj_geom('cone', pixel_size, pixel_size, rows, cols, angles, det_source, det_source - obj_source) - #id = create_projector('linear', geometry, vol) - #rec = create_backprojection(data, id) - - #plt.imshow(data, cmap='gray') - #plt.show() - - if anim: - fig, ax = plt.subplots() - - line, = ax.plot(sinogram[0, :], label='0$^\circ$') - line.set_color('black') - plt.rcParams['font.family'] = 'serif' - plt.rcParams['font.size'] = 24 - plt.xlabel('Detector') - plt.ylabel('Intensity') - plt.ylim([0, 1.1 * np.max(sinogram)]) - plt.xticks([]) - plt.yticks([]) - L = plt.legend(loc='upper left') - - def animate(i): - label = '%d$^\circ$' % (i) - line.set_ydata(sinogram[i, :]) # update the data. - L.get_texts()[0].set_text(label) - return line, - - - ani = animation.FuncAnimation( - fig, animate, interval=1, blit=True, save_count=num_of_projections) - - # To save the animation, use e.g. - # - ani.save('/home/santeri/Astra_figure/sinogram5.mp4', fps=15, writer='imagemagick') - # - # or - # - #writer = animation.FFMpegWriter( - # fps=6, metadata=dict(artist='Santeri Rytky'), bitrate=1800) - #ani.save("/home/santeri/Astra_figure/sinogram.mp4", writer=writer) - - if create_sinogram: - fig = plt.figure(figsize=(4, 5)) - #plt.tight_layout() - plt.imshow(sinogram, cmap='gray') # , aspect='auto') - plt.rcParams['font.family'] = 'serif' - plt.rcParams['font.size'] = 24 - np = num_of_projections - #plt.yticks([0, np // 4 - 1, np // 2 - 1, np * 3 // 4 - 1, np - 1], [0, 90, 180, 270, 360]) - plt.yticks([0, np // 4 - 1, np // 2 - 1, np * 3 // 4 - 1, np - 1], ['0$^\circ$', '45$^\circ$', '90$^\circ$', '135$^\circ$', '180$^\circ$']) - plt.xlabel('Detector') - plt.ylabel('Angle') - plt.xticks([]) - plt.savefig('/home/santeri/Astra_figure/sino_fan.tif', dpi=300) - plt.show() - - # Projection plots - - plt.plot(sinogram[0, :], '--', label='0 degrees') - plt.xticks([]) - plt.yticks([]) - plt.plot(sinogram[num_of_projections // 4, :], '-', label='90 degrees') - plt.plot(sinogram[num_of_projections // 8, :], ':', label='45 degrees') - plt.plot(sinogram[num_of_projections // 8 * 3, :], ':', label='135 degrees') - plt.legend() - plt.show() - - # Summed image - plt.plot(np.sum(data_ref, 0), '--', label='0 degrees') - plt.plot(np.sum(data_ref, 1), '-', label='90 degrees') - plt.title('Sum image') - plt.legend() - plt.show() - else: - plt.imshow(rec, cmap='gray') - plt.show() - plt.imshow(data_ref, cmap='gray') - plt.show() - - - - diff --git a/scripts/train_autoencoder.py b/scripts/train_autoencoder.py deleted file mode 100644 index 5acc881..0000000 --- a/scripts/train_autoencoder.py +++ /dev/null @@ -1,118 +0,0 @@ -from torch import optim, cuda -import torch.nn as nn -from time import time -from copy import deepcopy -import gc -from omegaconf import OmegaConf -import cv2 -from functools import partial -from torch.utils.tensorboard import SummaryWriter - -from collagen.core import Session -from collagen.strategies import Strategy -from collagen.callbacks import SamplingFreezer, ScalarMeterLogger, ImageSamplingVisualizer, RunningAverageMeter, \ - BatchProcFreezer - - -from bone_enhance.training.session import create_data_provider, init_experiment, init_callbacks, \ - save_transforms, init_loss, init_model -from bone_enhance.training import parse_grayscale, parse_autoencoder_2d, parse_autoencoder_3d -from bone_enhance.splits import build_splits -from bone_enhance.inference.pipeline_components import inference_runner_oof, evaluation_runner -from bone_enhance.models import AutoEncoder - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -if __name__ == "__main__": - # Timing - start = time() - - # Initialize experiment - args_base, config_list, config_paths, device = init_experiment(experiments='../experiments/run_autoencoder') - - for experiment in range(len(config_list)): - # Current experiment - start_exp = time() - args = deepcopy(args_base) # Copy args so that they can be updated - config = OmegaConf.create(config_list[experiment]) - #config.autoencoder = True - - # Update arguments according to the configuration file - if len(config.training.crop_small) == 3: - parser = partial(parse_autoencoder_3d, config=config) - else: - parser = partial(parse_autoencoder_2d, config=config) - - # Split training folds - parser_debug = partial(parser, debug=True) # Display figures - splits_metadata = build_splits(args.data_location, args, config, parser_debug, - args.snapshots_dir, config.training.snapshot) - mean, std = splits_metadata['mean'], splits_metadata['std'] - - # Loss - loss_criterion = nn.MSELoss().to(device) - - # Save transforms list - save_transforms(args.snapshots_dir / config.training.snapshot, config, args, mean, std) - - # Training for one fold - for fold in range(1): - #for fold in range(config.training.n_folds): - print(f'\nTraining fold {fold}') - # Initialize data provider - data_provider = create_data_provider(args, config, parser, metadata=splits_metadata[f'fold_{fold}'], - mean=mean, std=std) - - # Initialize model - vol = len(config.training.crop_small) == 3 - crop_size = tuple([crop * config.training.magnification for crop in config.training.crop_small]) - if args.gpus > 1: - model = nn.DataParallel(AutoEncoder(crop_size, vol=vol, rgb=config.training.rgb)).to(device) - else: - model = AutoEncoder(crop_size, vol=vol, rgb=config.training.rgb).to(device) - - # Optimizer - optimizer = optim.Adam(model.parameters(), - lr=config.training.lr, - #weight_decay=config.training.wd) - betas=(0.9, 0.999)) - # Callbacks - train_cbs, val_cbs = init_callbacks(fold, config, args.snapshots_dir, - config.training.snapshot, model, optimizer, mean=mean, std=std) - - # Initialize session - sessions = dict() - sessions['SR'] = Session(data_provider=data_provider, - train_loader_names=tuple(config.data_sampling.train.data_provider.SR.keys()), - val_loader_names=tuple(config.data_sampling.eval.data_provider.SR.keys()), - module=model, loss=loss_criterion, optimizer=optimizer, - train_callbacks=train_cbs, - val_callbacks=val_cbs) - - # Run training - strategy = Strategy(data_provider=data_provider, - data_sampling_config=config.data_sampling, - strategy_config=config.strategy, - sessions=sessions, - n_epochs=config.training.epochs, - device=device) - strategy.run() - - # Manage memory - del strategy - del model - cuda.empty_cache() - gc.collect() - - dur = time() - start_exp - print(f'Model {experiment + 1} trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') - - if config.inference.calc_inference: - save_dir = inference_runner_oof(args, config, splits_metadata, device) - - evaluation_runner(args, config, save_dir) - - dur = time() - start - print(f'Models trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') diff --git a/scripts/train_gan.py b/scripts/train_gan.py deleted file mode 100644 index 7426786..0000000 --- a/scripts/train_gan.py +++ /dev/null @@ -1,100 +0,0 @@ -from torch import optim -from time import time -from copy import deepcopy -from omegaconf import OmegaConf -import cv2 -from functools import partial -from torch.nn import BCEWithLogitsLoss, L1Loss, BCELoss, MSELoss - -from bone_enhance.training.session import init_experiment, save_transforms, create_data_provider, init_loss -from bone_enhance.training import parse_grayscale -from bone_enhance.splits import build_splits -from bone_enhance.gan import init_model_gan, init_callbacks, Trainer -from bone_enhance.inference.pipeline_components import inference_runner_oof, evaluation_runner - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - -if __name__ == "__main__": - # Timing - start = time() - - # Initialize experiment - args_base, config_list, config_paths, device = init_experiment(experiments='../experiments/run_gan') - - for experiment in range(len(config_list)): - # Current experiment - start_exp = time() - args = deepcopy(args_base) # Copy args so that they can be updated - config = OmegaConf.create(config_list[experiment]) - - # Update arguments according to the configuration file - parser = partial(parse_grayscale, config=config) - - # Split training folds - parser_debug = partial(parser, debug=True) # Display figures - splits_metadata = build_splits(args.data_location, args, config, parser_debug, - args.snapshots_dir, config.training.snapshot) - mean, std = splits_metadata['mean'], splits_metadata['std'] - - # Loss - criterion_GAN = MSELoss().to(device) - criterion_content = init_loss(config.training.loss, config, device=device) - criterion_pixel = L1Loss().to(device) - loss = { - 'content': criterion_content, - 'pixel': criterion_pixel, - 'adversarial': criterion_GAN - } - - # Save transforms list - save_transforms(args.snapshots_dir / config.training.snapshot, config, args, mean, std) - - # Training for separate folds - for fold in range(config.training.n_folds): - print(f'\nTraining fold {fold}') - - # Initialize model - generator, discriminator, feature_extractor = init_model_gan(config, device, args.gpus) - - # Optimizers - optimizer_d = optim.Adam(discriminator.parameters(), lr=config.training.lr, weight_decay=config.training.wd) - optimizer_g = optim.Adam(generator.parameters(), lr=config.training.lr, weight_decay=config.training.wd) - - # Initialize data provider - dataloader = create_data_provider(args, config, parser, metadata=splits_metadata[f'fold_{fold}'], - mean=mean, std=std) - - # Combine callbacks into dictionary - callbacks = init_callbacks(fold, config, args.snapshots_dir, config.training.snapshot, - (generator, discriminator), (optimizer_g, optimizer_d), mean, std) - callbacks = {'train': callbacks[0], 'eval': callbacks[1]} - current_snapshot_dir = args.snapshots_dir / config.training.snapshot - - # Set up model training - trainer = Trainer( - model=[generator, discriminator], - loaders=dataloader, - criterion=loss, - opt=[optimizer_g, optimizer_d], - device=device, - config=config, - callbacks=callbacks, - snapshot=current_snapshot_dir, - prefix=f'fold_{fold}', - mean=mean, - std=std - ) - trainer.run(num_epochs=config.training.epochs) - - dur = time() - start_exp - print(f'Model {experiment + 1} trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') - - if config.inference.calc_inference: - save_dir = inference_runner_oof(args, config, splits_metadata, device) - - evaluation_runner(args, config, save_dir) - - dur = time() - start - print(f'Models trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') - diff --git a/scripts/train_gan_collagen.py b/scripts/train_gan_collagen.py deleted file mode 100644 index c7f77b8..0000000 --- a/scripts/train_gan_collagen.py +++ /dev/null @@ -1,123 +0,0 @@ -from torch import optim, cuda -from time import time -from copy import deepcopy -import gc -from omegaconf import OmegaConf -import cv2 -from functools import partial -from torch.utils.tensorboard import SummaryWriter - -from collagen.core import Session -from collagen.strategies import Strategy -from collagen.callbacks import SamplingFreezer, ScalarMeterLogger, ImageSamplingVisualizer, RunningAverageMeter, \ - BatchProcFreezer -from collagen.losses import GeneratorLoss - -from bone_enhance.training.session import init_experiment, save_transforms -from bone_enhance.training import parse_grayscale -from bone_enhance.splits import build_splits -from bone_enhance.gan.main import create_data_provider_gan, init_model_gan, DiscriminatorLoss -from bone_enhance.inference.pipeline_components import inference_runner_oof, evaluation_runner - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -if __name__ == "__main__": - # Timing - start = time() - - # Initialize experiment - args_base, config_list, config_paths, device = init_experiment(experiments='../experiments/run_gan') - - for experiment in range(len(config_list)): - # Current experiment - start_exp = time() - args = deepcopy(args_base) # Copy args so that they can be updated - config = OmegaConf.create(config_list[experiment]) - - # Update arguments according to the configuration file - parser = partial(parse_grayscale, config=config) - - # Split training folds - parser_debug = partial(parser, debug=True) # Display figures - splits_metadata = build_splits(args.data_location, args, config, parser_debug, - args.snapshots_dir, config.training.snapshot) - mean, std = splits_metadata['mean'], splits_metadata['std'] - - # Save transforms list - save_transforms(args.snapshots_dir / config.training.snapshot, config, args, mean, std) - - # Training for separate folds - for fold in range(config.training.n_folds): - print(f'\nTraining fold {fold}') - - # Initialize model and optimizer - model_g, model_d, model_f = init_model_gan(config, device, args.gpus) - - optimizer_d = optim.Adam(model_d.parameters(), lr=config.training.lr, weight_decay=config.training.wd) - #loss_d = BCELoss().to(device) - loss_d = DiscriminatorLoss(config).to(device) - - optimizer_g = optim.Adam(model_g.parameters(), lr=config.training.lr, weight_decay=config.training.wd) - loss_g = GeneratorLoss(d_network=model_g, d_loss=loss_d).to(device) - - # Initialize data provider - item_loaders = dict() - data_provider = create_data_provider_gan(model_g, item_loaders, args, config, parser, - metadata=splits_metadata[f'fold_{fold}'], - mean=mean, std=std, device=device) - - # Setting up the callbacks - log_dir = args.snapshots_dir / config.training.snapshot / f"fold_{fold}_log" - summary_writer = SummaryWriter(comment='BoneEnhance', log_dir=log_dir, flush_secs=15, max_queue=1) - st_callbacks = (SamplingFreezer([model_d, model_g]), - ScalarMeterLogger(writer=summary_writer), - ImageSamplingVisualizer(generator_sampler=item_loaders['fake'], - transform=lambda x: (x + 1.0) / 2.0, - writer=summary_writer, - grid_shape=tuple(config.training.crop_small))) - - # Initialize session - sessions = dict() - sessions['G'] = Session(data_provider=data_provider, - train_loader_names=tuple(config.data_sampling.train.data_provider.G.keys()), - val_loader_names=tuple(config.data_sampling.eval.data_provider.G.keys()), - module=model_g, loss=loss_g, optimizer=optimizer_g, - train_callbacks=(BatchProcFreezer(modules=model_d), - RunningAverageMeter(prefix="train/G", name="loss")), - val_callbacks=RunningAverageMeter(prefix="eval/G", name="loss"),) - - sessions['D'] = Session(data_provider=data_provider, - train_loader_names=tuple(config.data_sampling.train.data_provider.D.keys()), - val_loader_names=None, - module=model_d, loss=loss_d, optimizer=optimizer_d, - train_callbacks=(BatchProcFreezer(modules=model_g), - RunningAverageMeter(prefix="train/D", name="loss"))) - - # Run training - strategy = Strategy(data_provider=data_provider, - data_sampling_config=config.data_sampling, - strategy_config=config.strategy, - sessions=sessions, - n_epochs=config.training.epochs, - callbacks=st_callbacks, - device=device) - - strategy.run() - - # Manage memory - del strategy - cuda.empty_cache() - gc.collect() - - dur = time() - start_exp - print(f'Model {experiment + 1} trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') - - if config.inference.calc_inference: - save_dir = inference_runner_oof(args, config, splits_metadata, device) - - evaluation_runner(args, config, save_dir) - - dur = time() - start - print(f'Models trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') diff --git a/scripts/train_segmentation.py b/scripts/train_segmentation.py deleted file mode 100644 index f3e161c..0000000 --- a/scripts/train_segmentation.py +++ /dev/null @@ -1,109 +0,0 @@ -from torch import optim, cuda -from time import time -from copy import deepcopy -import gc -from omegaconf import OmegaConf -import cv2 -from functools import partial -from torch.utils.tensorboard import SummaryWriter - -from collagen.core import Session -from collagen.strategies import Strategy -from collagen.callbacks import SamplingFreezer, ScalarMeterLogger, ImageSamplingVisualizer, RunningAverageMeter, \ - BatchProcFreezer - - -from bone_enhance.training.session import create_data_provider, init_experiment, init_callbacks, \ - save_transforms, init_loss, init_model -from bone_enhance.training import parse_segmentation -from bone_enhance.splits import build_splits -from bone_enhance.inference.pipeline_components import inference_runner_oof, evaluation_runner - -cv2.ocl.setUseOpenCL(False) -cv2.setNumThreads(0) - - -if __name__ == "__main__": - # Timing - start = time() - - # Initialize experiment - args_base, config_list, config_paths, device = init_experiment(experiments='../experiments/run_segmentation/') - args_base.segmentation = True - - for experiment in range(len(config_list)): - # Current experiment - start_exp = time() - args = deepcopy(args_base) # Copy args so that they can be updated - config = OmegaConf.create(config_list[experiment]) - - # Update arguments according to the configuration file - if len(config.training.crop_small) == 3: - raise Exception('No 3D segmentation supported!') - else: - parser = partial(parse_segmentation, config=config) - - # Split training folds - parser_debug = partial(parser, debug=True) # Display figures - splits_metadata = build_splits(args.data_location, args, config, parser_debug, - args.snapshots_dir, config.training.snapshot) - mean, std = splits_metadata['mean'], splits_metadata['std'] - - # Loss - loss_criterion = init_loss(config.training.loss, config, device=device, mean=mean, std=std, args=args) - - # Save transforms list - save_transforms(args.snapshots_dir / config.training.snapshot, config, args, mean, std) - - # Training for separate folds - for fold in range(config.training.n_folds): - print(f'\nTraining fold {fold}') - # Initialize data provider - data_provider = create_data_provider(args, config, parser, metadata=splits_metadata[f'fold_{fold}'], - mean=mean, std=std) - - # Initialize model - model = init_model(config, device, args.gpus, args=args) - - # Optimizer - optimizer = optim.Adam(model.parameters(), - lr=config.training.lr, - weight_decay=config.training.wd) - # Callbacks - train_cbs, val_cbs = init_callbacks(fold, config, args.snapshots_dir, - config.training.snapshot, model, optimizer, mean=mean, std=std) - - # Initialize session - sessions = dict() - sessions['SR'] = Session(data_provider=data_provider, - train_loader_names=tuple(config.data_sampling.train.data_provider.SR.keys()), - val_loader_names=tuple(config.data_sampling.eval.data_provider.SR.keys()), - module=model, loss=loss_criterion, optimizer=optimizer, - train_callbacks=train_cbs, - val_callbacks=val_cbs) - - # Run training - strategy = Strategy(data_provider=data_provider, - data_sampling_config=config.data_sampling, - strategy_config=config.strategy, - sessions=sessions, - n_epochs=config.training.epochs, - device=device) - strategy.run() - - # Manage memory - del strategy - del model - cuda.empty_cache() - gc.collect() - - dur = time() - start_exp - print(f'Model {experiment + 1} trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') - - if config.inference.calc_inference: - save_dir = inference_runner_oof(args, config, splits_metadata, device) - - evaluation_runner(args, config, save_dir) - - dur = time() - start - print(f'Models trained in {dur // 3600} hours, {(dur % 3600) // 60} minutes, {dur % 60} seconds.') diff --git a/scripts/train_single_cluster.sh b/scripts/train_single_cluster.sh deleted file mode 100755 index 4f8d3d4..0000000 --- a/scripts/train_single_cluster.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -# Slurm variables -#SBATCH --job-name=BoneEnhance_train -#SBATCH --account=project_2002147 -#SBATCH --mail-type=END #Send email when job is finished -#SBATCH --partition=gpu -#SBATCH --time=8:00:00 -#SBATCH --ntasks=1 -#SBATCH --mem=64G -#SBATCH --cpus-per-task=16 -#SBATCH --gres=gpu:v100:1 - -# Set up environment -#export PROJAPPL=/projappl/project_2002147 -export SCRATCH=/scratch/project_2002147/rytkysan -#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/projappl/project_2002147/miniconda3/lib -#csc-workspaces set project_2002147 - -#module load gcc/8.3.0 cuda/10.1.168 -#module load pytorch/1.7 - -# Conda environment -. ${SCRATCH}/miniconda3/etc/profile.d/conda.sh - -#pip install hydra-core --user -#pip install -e ../../../solt/. --user -#pip install -e ../../../Collagen/. --user -#pip install -e .. --user -#pip install opencv-python --user - -# Paths -#ENV_FILE=/scratch/project_2002147/rytkysan/BoneEnhance/BoneEnhance/environment.yml - - -#. ${PROJAPPL}/miniconda3/etc/profile.d/conda.sh -conda activate bone-enhance-env -#conda env create -f ${ENV_FILE} -prefix ${SCRATCH}/envs/bone-enhance-env -#. ./create_env.sh - -echo "Start the job..." -declare -i SEED=42 # Random seed - -for VALUE in {1..11} -do - sbatch ./exp_csc_train.sh $VALUE ${SEED} -done - -echo "Done the job!" \ No newline at end of file diff --git a/scripts/visualize_stacks.py b/scripts/visualize_stacks.py deleted file mode 100644 index b58bef0..0000000 --- a/scripts/visualize_stacks.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import h5py -from pathlib import Path -from bone_enhance.utilities import print_orthogonal -from bone_enhance.training.session import init_experiment - -if __name__ == "__main__": - # Initialize experiment - args, _, _, _ = init_experiment() - base_path = args.data_location - images_loc = base_path / 'target_3d' - - # List files - samples = os.listdir(images_loc) - samples.sort() - for sample in samples: - im_path = images_loc / Path(sample) - - with h5py.File(str(im_path), 'r') as f: - data_xy = f['data'][:] - - print_orthogonal(data_xy, title=sample[:-3], res=3.2, scale_factor=1000)