From c375b93315565a9f2a7e22d959b359b339761b89 Mon Sep 17 00:00:00 2001 From: Jakub Date: Mon, 9 May 2022 11:54:08 +0200 Subject: [PATCH 01/15] Added support for config --- tools/train.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/train.py b/tools/train.py index b69066fa6..51a6baca4 100644 --- a/tools/train.py +++ b/tools/train.py @@ -41,6 +41,12 @@ def make_parser(): type=str, help="plz input your experiment description file", ) + parser.add_argument( + "--config_filepath", + default=None, + type=str, + help="Filepath to config file", + ) parser.add_argument( "--resume", default=False, action="store_true", help="resume training" ) From c03d5bab9333771c6ba798530a2b66fdec69c442 Mon Sep 17 00:00:00 2001 From: Jakub Date: Mon, 9 May 2022 12:26:05 +0200 Subject: [PATCH 02/15] Added neptune integration --- tools/train.py | 6 ++++++ yolox/core/trainer.py | 2 +- yolox/exp/base_exp.py | 14 +++++++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/tools/train.py b/tools/train.py index 51a6baca4..9195fb15e 100644 --- a/tools/train.py +++ b/tools/train.py @@ -7,6 +7,7 @@ import warnings from loguru import logger +import yaml import torch import torch.backends.cudnn as cudnn @@ -131,6 +132,11 @@ def main(exp, args): if not args.experiment_name: args.experiment_name = exp.exp_name + if args.config_filepath is not None: + with open(args.config_filepath, "r") as f: + config = yaml.safe_load(f) + exp.add_params_from_config(config, use_neptune=True) + num_gpu = get_num_devices() if args.devices is None else args.devices assert num_gpu <= get_num_devices() diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index a9ee2a681..b0b0afeb3 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -38,7 +38,7 @@ def __init__(self, exp, args): # before_train methods. self.exp = exp self.args = args - + self.neptune = self.exp.neptune # training related attr self.max_epoch = exp.max_epoch self.amp_training = args.fp16 diff --git a/yolox/exp/base_exp.py b/yolox/exp/base_exp.py index e26ae079c..127c5de07 100644 --- a/yolox/exp/base_exp.py +++ b/yolox/exp/base_exp.py @@ -8,6 +8,7 @@ from typing import Dict from tabulate import tabulate +import neptune.new as neptune import torch from torch.nn import Module @@ -22,7 +23,10 @@ def __init__(self): self.output_dir = "./YOLOX_outputs" self.print_interval = 100 self.eval_interval = 10 - + self.neptune = neptune.init( + project="jakub.pingielski/b-yond", + api_token="eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiI2NTlkYzZmZC1kZTY5LTQ2NjMtODFkZC04YmY4NTNmYTkwMTIifQ==", + ) @abstractmethod def get_model(self) -> Module: pass @@ -73,3 +77,11 @@ def merge(self, cfg_list): except Exception: v = ast.literal_eval(v) setattr(self, k, v) + + def add_params_from_config(self, config: dict, use_neptune: bool = True): + for key, value in config.items(): + setattr(self, key, value) + if use_neptune and self.neptune: + self.neptune[f"config/{key}"].log(value) + + From c56359c040db0de33ce0ad4ec9d8f0f114c6b257 Mon Sep 17 00:00:00 2001 From: Jakub Date: Tue, 10 May 2022 11:30:41 +0200 Subject: [PATCH 03/15] artifact logging --- tools/train.py | 2 +- yolox/core/trainer.py | 2 ++ yolox/exp/yolox_base.py | 2 +- yolox/utils/checkpoint.py | 7 ++++++- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/train.py b/tools/train.py index 9195fb15e..abdb55f08 100644 --- a/tools/train.py +++ b/tools/train.py @@ -136,7 +136,7 @@ def main(exp, args): with open(args.config_filepath, "r") as f: config = yaml.safe_load(f) exp.add_params_from_config(config, use_neptune=True) - + exp.neptune.log_artifact(args.config_filepath) num_gpu = get_num_devices() if args.devices is None else args.devices assert num_gpu <= get_num_devices() diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index b0b0afeb3..1b79c53fb 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -364,3 +364,5 @@ def save_ckpt(self, ckpt_name, update_best_ckpt=False): if self.args.logger == "wandb": self.wandb_logger.save_checkpoint(self.file_name, ckpt_name, update_best_ckpt) + if self.neptune: + self.neptune.log_artefact() diff --git a/yolox/exp/yolox_base.py b/yolox/exp/yolox_base.py index 611b25825..5d0496f49 100644 --- a/yolox/exp/yolox_base.py +++ b/yolox/exp/yolox_base.py @@ -94,7 +94,7 @@ def __init__(self): self.eval_interval = 10 # save history checkpoint or not. # If set to False, yolox will only save latest and best ckpt. - self.save_history_ckpt = True + self.save_history_ckpt = False # name of experiment self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] diff --git a/yolox/utils/checkpoint.py b/yolox/utils/checkpoint.py index a0c200e41..2d6fa4226 100644 --- a/yolox/utils/checkpoint.py +++ b/yolox/utils/checkpoint.py @@ -4,6 +4,7 @@ import os import shutil from loguru import logger +import neptune.new as neptune import torch @@ -33,7 +34,7 @@ def load_ckpt(model, ckpt): return model -def save_checkpoint(state, is_best, save_dir, model_name=""): +def save_checkpoint(state, is_best, save_dir, model_name, neptune): if not os.path.exists(save_dir): os.makedirs(save_dir) filename = os.path.join(save_dir, model_name + "_ckpt.pth") @@ -41,3 +42,7 @@ def save_checkpoint(state, is_best, save_dir, model_name=""): if is_best: best_filename = os.path.join(save_dir, "best_ckpt.pth") shutil.copyfile(filename, best_filename) + if neptune: + neptune.log_artifact(best_filename) + + From a0f589c505fa8e1164e42b442ecf5e1a35ecfe9a Mon Sep 17 00:00:00 2001 From: Jakub Date: Tue, 10 May 2022 11:43:44 +0200 Subject: [PATCH 04/15] artifact logging --- tools/train.py | 1 + yolox/core/trainer.py | 4 ++-- yolox/utils/checkpoint.py | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/train.py b/tools/train.py index abdb55f08..9a833d03a 100644 --- a/tools/train.py +++ b/tools/train.py @@ -137,6 +137,7 @@ def main(exp, args): config = yaml.safe_load(f) exp.add_params_from_config(config, use_neptune=True) exp.neptune.log_artifact(args.config_filepath) + exp.neptune['config'].track_files(args.config_filepath) num_gpu = get_num_devices() if args.devices is None else args.devices assert num_gpu <= get_num_devices() diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index 1b79c53fb..3416a85fe 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -360,9 +360,9 @@ def save_ckpt(self, ckpt_name, update_best_ckpt=False): update_best_ckpt, self.file_name, ckpt_name, + self.neptune, ) if self.args.logger == "wandb": self.wandb_logger.save_checkpoint(self.file_name, ckpt_name, update_best_ckpt) - if self.neptune: - self.neptune.log_artefact() + diff --git a/yolox/utils/checkpoint.py b/yolox/utils/checkpoint.py index 2d6fa4226..74995d038 100644 --- a/yolox/utils/checkpoint.py +++ b/yolox/utils/checkpoint.py @@ -44,5 +44,7 @@ def save_checkpoint(state, is_best, save_dir, model_name, neptune): shutil.copyfile(filename, best_filename) if neptune: neptune.log_artifact(best_filename) + neptune['best_checkpoint'].track_files(best_filename) + From dbc43f89a80a200e7b1377b7dc58c78115efa39c Mon Sep 17 00:00:00 2001 From: Jakub Date: Tue, 10 May 2022 11:53:13 +0200 Subject: [PATCH 05/15] artifact logging --- tools/train.py | 2 +- yolox/utils/checkpoint.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/train.py b/tools/train.py index 9a833d03a..46eeff52f 100644 --- a/tools/train.py +++ b/tools/train.py @@ -136,7 +136,7 @@ def main(exp, args): with open(args.config_filepath, "r") as f: config = yaml.safe_load(f) exp.add_params_from_config(config, use_neptune=True) - exp.neptune.log_artifact(args.config_filepath) + print("saving config from", args.config_filepath) exp.neptune['config'].track_files(args.config_filepath) num_gpu = get_num_devices() if args.devices is None else args.devices assert num_gpu <= get_num_devices() diff --git a/yolox/utils/checkpoint.py b/yolox/utils/checkpoint.py index 74995d038..5595844bf 100644 --- a/yolox/utils/checkpoint.py +++ b/yolox/utils/checkpoint.py @@ -42,9 +42,8 @@ def save_checkpoint(state, is_best, save_dir, model_name, neptune): if is_best: best_filename = os.path.join(save_dir, "best_ckpt.pth") shutil.copyfile(filename, best_filename) - if neptune: - neptune.log_artifact(best_filename) - neptune['best_checkpoint'].track_files(best_filename) + print("saving best checkpoint to ", best_filename) + neptune['best_checkpoint'].track_files(best_filename) From 3fe9cd281160e99d388cc2faa6e926e28680a73c Mon Sep 17 00:00:00 2001 From: Jakub Date: Tue, 10 May 2022 13:43:22 +0200 Subject: [PATCH 06/15] refactor --- tools/train.py | 1 - yolox/utils/checkpoint.py | 1 - 2 files changed, 2 deletions(-) diff --git a/tools/train.py b/tools/train.py index 46eeff52f..a7d40028d 100644 --- a/tools/train.py +++ b/tools/train.py @@ -136,7 +136,6 @@ def main(exp, args): with open(args.config_filepath, "r") as f: config = yaml.safe_load(f) exp.add_params_from_config(config, use_neptune=True) - print("saving config from", args.config_filepath) exp.neptune['config'].track_files(args.config_filepath) num_gpu = get_num_devices() if args.devices is None else args.devices assert num_gpu <= get_num_devices() diff --git a/yolox/utils/checkpoint.py b/yolox/utils/checkpoint.py index 5595844bf..00ae8c5f3 100644 --- a/yolox/utils/checkpoint.py +++ b/yolox/utils/checkpoint.py @@ -42,7 +42,6 @@ def save_checkpoint(state, is_best, save_dir, model_name, neptune): if is_best: best_filename = os.path.join(save_dir, "best_ckpt.pth") shutil.copyfile(filename, best_filename) - print("saving best checkpoint to ", best_filename) neptune['best_checkpoint'].track_files(best_filename) From 586d3ede0c2de483bbd9838a2ef45080272187bb Mon Sep 17 00:00:00 2001 From: Jakub Date: Tue, 10 May 2022 14:06:50 +0200 Subject: [PATCH 07/15] refactor --- yolox/utils/checkpoint.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/yolox/utils/checkpoint.py b/yolox/utils/checkpoint.py index 00ae8c5f3..50da303a6 100644 --- a/yolox/utils/checkpoint.py +++ b/yolox/utils/checkpoint.py @@ -34,7 +34,7 @@ def load_ckpt(model, ckpt): return model -def save_checkpoint(state, is_best, save_dir, model_name, neptune): +def save_checkpoint(state, is_best, save_dir, model_name="", neptune=None): if not os.path.exists(save_dir): os.makedirs(save_dir) filename = os.path.join(save_dir, model_name + "_ckpt.pth") @@ -42,7 +42,5 @@ def save_checkpoint(state, is_best, save_dir, model_name, neptune): if is_best: best_filename = os.path.join(save_dir, "best_ckpt.pth") shutil.copyfile(filename, best_filename) - neptune['best_checkpoint'].track_files(best_filename) - - - + if neptune: + neptune['best_checkpoint'].track_files(best_filename) From da1f121df589092323de3d73f24bf229d25291ac Mon Sep 17 00:00:00 2001 From: Jakub Date: Thu, 12 May 2022 11:52:24 +0200 Subject: [PATCH 08/15] add more metrics --- tools/train.py | 2 +- yolox/core/trainer.py | 6 ++++-- yolox/exp/base_exp.py | 8 ++++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/tools/train.py b/tools/train.py index a7d40028d..fb2b35717 100644 --- a/tools/train.py +++ b/tools/train.py @@ -136,7 +136,7 @@ def main(exp, args): with open(args.config_filepath, "r") as f: config = yaml.safe_load(f) exp.add_params_from_config(config, use_neptune=True) - exp.neptune['config'].track_files(args.config_filepath) + exp.neptune['config_file'].track_files(args.config_filepath) num_gpu = get_num_devices() if args.devices is None else args.devices assert num_gpu <= get_num_devices() diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index 3416a85fe..6048101ee 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -114,6 +114,7 @@ def train_one_iter(self): self.ema_model.update(self.model) lr = self.lr_scheduler.update_lr(self.progress_in_iter + 1) + self.neptune['lr'].log(lr) for param_group in self.optimizer.param_groups: param_group["lr"] = lr @@ -243,7 +244,8 @@ def after_iter(self): loss_str = ", ".join( ["{}: {:.1f}".format(k, v.latest) for k, v in loss_meter.items()] ) - + for loss_name, loss_value in loss_meter.items(): + self.neptune[loss_name].log(loss_value.latest()) time_meter = self.meter.get_filtered_meter("time") time_str = ", ".join( ["{}: {:.3f}s".format(k, v.avg) for k, v in time_meter.items()] @@ -327,7 +329,7 @@ def evaluate_and_save_model(self): update_best_ckpt = ap50_95 > self.best_ap self.best_ap = max(self.best_ap, ap50_95) - + self.neptune['best_ap'].log(self.best_ap) if self.rank == 0: if self.args.logger == "tensorboard": self.tblogger.add_scalar("val/COCOAP50", ap50, self.epoch + 1) diff --git a/yolox/exp/base_exp.py b/yolox/exp/base_exp.py index 127c5de07..e76400b17 100644 --- a/yolox/exp/base_exp.py +++ b/yolox/exp/base_exp.py @@ -13,7 +13,7 @@ from torch.nn import Module from yolox.utils import LRScheduler - +from paths import DATASETS_PATH class BaseExp(metaclass=ABCMeta): """Basic class for any experiment.""" @@ -80,7 +80,11 @@ def merge(self, cfg_list): def add_params_from_config(self, config: dict, use_neptune: bool = True): for key, value in config.items(): - setattr(self, key, value) + if key == "dataset_version": + value = DATASETS_PATH / key + setattr("dataset_dir", value) + else: + setattr(self, key, value) if use_neptune and self.neptune: self.neptune[f"config/{key}"].log(value) From 9415f617129caf6863a03d1ebd77e679a24c112d Mon Sep 17 00:00:00 2001 From: Jakub Date: Thu, 12 May 2022 12:20:42 +0200 Subject: [PATCH 09/15] bug fix --- yolox/core/trainer.py | 6 +++--- yolox/exp/base_exp.py | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index 6048101ee..94b18024e 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -114,7 +114,7 @@ def train_one_iter(self): self.ema_model.update(self.model) lr = self.lr_scheduler.update_lr(self.progress_in_iter + 1) - self.neptune['lr'].log(lr) + self.neptune['config/lr'].log(lr) for param_group in self.optimizer.param_groups: param_group["lr"] = lr @@ -245,7 +245,7 @@ def after_iter(self): ["{}: {:.1f}".format(k, v.latest) for k, v in loss_meter.items()] ) for loss_name, loss_value in loss_meter.items(): - self.neptune[loss_name].log(loss_value.latest()) + self.neptune[f"loss/{loss_name}"].log(loss_value.latest) time_meter = self.meter.get_filtered_meter("time") time_str = ", ".join( ["{}: {:.3f}s".format(k, v.avg) for k, v in time_meter.items()] @@ -329,7 +329,7 @@ def evaluate_and_save_model(self): update_best_ckpt = ap50_95 > self.best_ap self.best_ap = max(self.best_ap, ap50_95) - self.neptune['best_ap'].log(self.best_ap) + self.neptune['metrics/best_ap'].log(self.best_ap) if self.rank == 0: if self.args.logger == "tensorboard": self.tblogger.add_scalar("val/COCOAP50", ap50, self.epoch + 1) diff --git a/yolox/exp/base_exp.py b/yolox/exp/base_exp.py index e76400b17..479cd3414 100644 --- a/yolox/exp/base_exp.py +++ b/yolox/exp/base_exp.py @@ -81,8 +81,7 @@ def merge(self, cfg_list): def add_params_from_config(self, config: dict, use_neptune: bool = True): for key, value in config.items(): if key == "dataset_version": - value = DATASETS_PATH / key - setattr("dataset_dir", value) + setattr(self, "dataset_dir", DATASETS_PATH / value) else: setattr(self, key, value) if use_neptune and self.neptune: From 9e7c5b84179715bcc838c5fc530d115cc5957077 Mon Sep 17 00:00:00 2001 From: Dawid Stachowiak Date: Thu, 19 May 2022 16:01:14 +0200 Subject: [PATCH 10/15] fixes in training and neptune logging --- tools/train.py | 7 ++++++- yolox/exp/base_exp.py | 17 +++++++++++++---- yolox/utils/checkpoint.py | 2 +- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/tools/train.py b/tools/train.py index fb2b35717..cfbf19954 100644 --- a/tools/train.py +++ b/tools/train.py @@ -127,6 +127,11 @@ def main(exp, args): if __name__ == "__main__": args = make_parser().parse_args() exp = get_exp(args.exp_file, args.name) + + #TODO: Add neptune logging with multidevice training. Logging now works only + # on 1 gpu device training, not working with multiprocessing. + exp.set_neptune_logging(True) + exp.merge(args.opts) if not args.experiment_name: @@ -136,7 +141,7 @@ def main(exp, args): with open(args.config_filepath, "r") as f: config = yaml.safe_load(f) exp.add_params_from_config(config, use_neptune=True) - exp.neptune['config_file'].track_files(args.config_filepath) + exp.neptune['config_file'].upload(args.config_filepath) num_gpu = get_num_devices() if args.devices is None else args.devices assert num_gpu <= get_num_devices() diff --git a/yolox/exp/base_exp.py b/yolox/exp/base_exp.py index 479cd3414..1aabd0d51 100644 --- a/yolox/exp/base_exp.py +++ b/yolox/exp/base_exp.py @@ -23,10 +23,8 @@ def __init__(self): self.output_dir = "./YOLOX_outputs" self.print_interval = 100 self.eval_interval = 10 - self.neptune = neptune.init( - project="jakub.pingielski/b-yond", - api_token="eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiI2NTlkYzZmZC1kZTY5LTQ2NjMtODFkZC04YmY4NTNmYTkwMTIifQ==", - ) + self.neptune = None + @abstractmethod def get_model(self) -> Module: pass @@ -64,6 +62,17 @@ def __repr__(self): ] return tabulate(exp_table, headers=table_header, tablefmt="fancy_grid") + def set_neptune_logging(self, state): + if state: + self.neptune = neptune.init( + project="jakub.pingielski/b-yond", + api_token="eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiI2NTlkYzZmZC1kZTY5LTQ2NjMtODFkZC04YmY4NTNmYTkwMTIifQ==", + ) + else: + if self.neptune is not None: + self.neptune.stop() + self.neptune = None + def merge(self, cfg_list): assert len(cfg_list) % 2 == 0 for k, v in zip(cfg_list[0::2], cfg_list[1::2]): diff --git a/yolox/utils/checkpoint.py b/yolox/utils/checkpoint.py index 50da303a6..e7d732bef 100644 --- a/yolox/utils/checkpoint.py +++ b/yolox/utils/checkpoint.py @@ -43,4 +43,4 @@ def save_checkpoint(state, is_best, save_dir, model_name="", neptune=None): best_filename = os.path.join(save_dir, "best_ckpt.pth") shutil.copyfile(filename, best_filename) if neptune: - neptune['best_checkpoint'].track_files(best_filename) + neptune['best_checkpoint'].upload(best_filename) From d26a6ca5600528e1eaad05781e6789cee56d4c81 Mon Sep 17 00:00:00 2001 From: Aditya-Bobade Date: Thu, 19 May 2022 21:38:01 +0530 Subject: [PATCH 11/15] validation loss logging --- yolox/core/trainer.py | 38 ++++++++++++++++++++++++++++ yolox/exp/yolox_base.py | 56 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index 94b18024e..f01c85aac 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -152,10 +152,18 @@ def before_train(self): no_aug=self.no_aug, cache_img=self.args.cache, ) + self.val_loader = self.exp.get_val_loader( + batch_size=self.args.batch_size, + is_distributed=self.is_distributed, + no_aug=False, + cache_img=self.args.cache, + ) logger.info("init prefetcher, this might take one minute or less...") self.prefetcher = DataPrefetcher(self.train_loader) + self.val_prefetcher = DataPrefetcher(self.val_loader) # max_iter means iters per epoch self.max_iter = len(self.train_loader) + self.max_val_iter = len(self.val_loader) self.lr_scheduler = self.exp.get_lr_scheduler( self.exp.basic_lr_per_img * self.args.batch_size, self.max_iter @@ -315,6 +323,9 @@ def resume_train(self, model): return model def evaluate_and_save_model(self): + # calculate loss + self.calculate_eval_loss() + if self.use_model_ema: evalmodel = self.ema_model.ema else: @@ -368,3 +379,30 @@ def save_ckpt(self, ckpt_name, update_best_ckpt=False): if self.args.logger == "wandb": self.wandb_logger.save_checkpoint(self.file_name, ckpt_name, update_best_ckpt) + def calculate_eval_loss(self): + for iter in range(self.max_val_iter): + inps, targets = self.val_prefetcher.next() + inps = inps.to(self.data_type) + targets = targets.to(self.data_type) + targets.requires_grad = False + inps, targets = self.exp.preprocess(inps, targets, self.input_size) + + with torch.cuda.amp.autocast(enabled=self.amp_training): + outputs = self.model(inps, targets) + + loss = { + "total_loss": outputs["total_loss"], + "iou_loss": outputs["iou_loss"], + "l1_loss": outputs["l1_loss"], + "conf_loss": outputs["conf_loss"], + "cls_loss": outputs["cls_loss"] + } + progress_str = "epoch: {}/{}, iter: {}/{},".format( + self.epoch + 1, self.max_epoch, iter + 1, self.max_val_iter + ) + + for loss_name, loss_value in loss.items(): + progress_str += " {}: {:.1f}".format(loss_name, loss_value) + self.neptune[f"loss/val/{loss_name}"].log(loss_value) + + logger.info("Validation:{}".format(progress_str)) diff --git a/yolox/exp/yolox_base.py b/yolox/exp/yolox_base.py index 5d0496f49..097a9c5e3 100644 --- a/yolox/exp/yolox_base.py +++ b/yolox/exp/yolox_base.py @@ -201,6 +201,62 @@ def get_data_loader( return train_loader + def get_val_loader( + self, batch_size, is_distributed, no_aug=False, cache_img=False, testdev=False + ): + from yolox.data import ( + COCODataset, + TrainTransform, + YoloBatchSampler, + DataLoader, + InfiniteSampler, + MosaicDetection, + worker_init_reset_seed, + ) + from yolox.utils import ( + wait_for_the_master, + get_local_rank, + ) + + local_rank = get_local_rank() + + with wait_for_the_master(local_rank): + dataset = COCODataset( + data_dir=self.data_dir, + json_file=self.val_ann if not testdev else self.test_ann, + img_size=self.input_size, + preproc=TrainTransform( + max_labels=50, + flip_prob=0.0, + hsv_prob=0.0), + cache=cache_img, + ) + + self.dataset = dataset + + if is_distributed: + batch_size = batch_size // dist.get_world_size() + + sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0) + + batch_sampler = YoloBatchSampler( + sampler=sampler, + batch_size=batch_size, + drop_last=False, + mosaic=not no_aug, + ) + + dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} + dataloader_kwargs["batch_sampler"] = batch_sampler + + # Make sure each process has different random seed, especially for 'fork' method. + # Check https://github.com/pytorch/pytorch/issues/63311 for more details. + dataloader_kwargs["worker_init_fn"] = worker_init_reset_seed + + val_loader = DataLoader(self.dataset, **dataloader_kwargs) + + return val_loader + def random_resize(self, data_loader, epoch, rank, is_distributed): tensor = torch.LongTensor(2).cuda() From e53d2bc5343490fc4d1e76a3421972fe10ee1c44 Mon Sep 17 00:00:00 2001 From: Aditya-Bobade Date: Fri, 20 May 2022 14:28:16 +0530 Subject: [PATCH 12/15] flag for validation loss logging --- yolox/core/trainer.py | 25 ++++++++++++++++--------- yolox/exp/yolox_base.py | 2 ++ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index f01c85aac..3dedda515 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -55,6 +55,9 @@ def __init__(self, exp, args): self.input_size = exp.input_size self.best_ap = 0 + # validation loss + self.calc_validation_loss = exp.calc_val_loss + # metric record self.meter = MeterBuffer(window_size=exp.print_interval) self.file_name = os.path.join(exp.output_dir, args.experiment_name) @@ -152,18 +155,21 @@ def before_train(self): no_aug=self.no_aug, cache_img=self.args.cache, ) - self.val_loader = self.exp.get_val_loader( - batch_size=self.args.batch_size, - is_distributed=self.is_distributed, - no_aug=False, - cache_img=self.args.cache, - ) + if self.calc_validation_loss: + self.val_loader = self.exp.get_val_loader( + batch_size=self.args.batch_size, + is_distributed=self.is_distributed, + no_aug=False, + cache_img=self.args.cache, + ) logger.info("init prefetcher, this might take one minute or less...") self.prefetcher = DataPrefetcher(self.train_loader) - self.val_prefetcher = DataPrefetcher(self.val_loader) + if self.calc_validation_loss: + self.val_prefetcher = DataPrefetcher(self.val_loader) # max_iter means iters per epoch self.max_iter = len(self.train_loader) - self.max_val_iter = len(self.val_loader) + if self.calc_validation_loss: + self.max_val_iter = len(self.val_loader) self.lr_scheduler = self.exp.get_lr_scheduler( self.exp.basic_lr_per_img * self.args.batch_size, self.max_iter @@ -324,7 +330,8 @@ def resume_train(self, model): def evaluate_and_save_model(self): # calculate loss - self.calculate_eval_loss() + if self.calc_validation_loss: + self.calculate_eval_loss() if self.use_model_ema: evalmodel = self.ema_model.ema diff --git a/yolox/exp/yolox_base.py b/yolox/exp/yolox_base.py index 097a9c5e3..51d9337c4 100644 --- a/yolox/exp/yolox_base.py +++ b/yolox/exp/yolox_base.py @@ -81,6 +81,8 @@ def __init__(self): self.no_aug_epochs = 15 # apply EMA during training self.ema = True + # calculate validation loss + self.calc_val_loss = False # weight decay of optimizer self.weight_decay = 5e-4 From da181cfbfdd58ef7398d8f1d63e7df4280b649e8 Mon Sep 17 00:00:00 2001 From: Aditya-Bobade Date: Fri, 20 May 2022 17:00:17 +0530 Subject: [PATCH 13/15] average validation loss logging --- yolox/core/trainer.py | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index 3dedda515..4d400e66f 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -5,6 +5,7 @@ import datetime import os import time +import numpy as np from loguru import logger import torch @@ -387,6 +388,13 @@ def save_ckpt(self, ckpt_name, update_best_ckpt=False): self.wandb_logger.save_checkpoint(self.file_name, ckpt_name, update_best_ckpt) def calculate_eval_loss(self): + loss = { + "total_loss": [], + "iou_loss": [], + "l1_loss": [], + "conf_loss": [], + "cls_loss": [] + } for iter in range(self.max_val_iter): inps, targets = self.val_prefetcher.next() inps = inps.to(self.data_type) @@ -397,19 +405,18 @@ def calculate_eval_loss(self): with torch.cuda.amp.autocast(enabled=self.amp_training): outputs = self.model(inps, targets) - loss = { - "total_loss": outputs["total_loss"], - "iou_loss": outputs["iou_loss"], - "l1_loss": outputs["l1_loss"], - "conf_loss": outputs["conf_loss"], - "cls_loss": outputs["cls_loss"] - } - progress_str = "epoch: {}/{}, iter: {}/{},".format( - self.epoch + 1, self.max_epoch, iter + 1, self.max_val_iter - ) + loss["total_loss"].append(outputs["total_loss"]) + loss["iou_loss"].append(outputs["iou_loss"]) + loss["l1_loss"].append(outputs["l1_loss"]) + loss["conf_loss"].append(outputs["conf_loss"]) + loss["cls_loss"].append(outputs["cls_loss"]) + + progress_str = "epoch: {}/{},".format( + self.epoch + 1, self.max_epoch + ) - for loss_name, loss_value in loss.items(): - progress_str += " {}: {:.1f}".format(loss_name, loss_value) - self.neptune[f"loss/val/{loss_name}"].log(loss_value) + for loss_name, loss_value in loss.items(): + progress_str += " {}: {:.1f}".format(loss_name, np.nanmean(loss_value)) + self.neptune[f"loss/val/{loss_name}"].log(np.nanmean(loss_value)) - logger.info("Validation:{}".format(progress_str)) + logger.info("Validation:{}".format(progress_str)) From 47eccabd586bfea33d893a50e07a24fb08bcc38e Mon Sep 17 00:00:00 2001 From: Aditya-Bobade Date: Fri, 20 May 2022 19:58:48 +0530 Subject: [PATCH 14/15] remove average validation loss logging --- yolox/core/trainer.py | 39 ++++++++++++++------------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/yolox/core/trainer.py b/yolox/core/trainer.py index 4d400e66f..215e47c5d 100644 --- a/yolox/core/trainer.py +++ b/yolox/core/trainer.py @@ -5,7 +5,6 @@ import datetime import os import time -import numpy as np from loguru import logger import torch @@ -388,35 +387,25 @@ def save_ckpt(self, ckpt_name, update_best_ckpt=False): self.wandb_logger.save_checkpoint(self.file_name, ckpt_name, update_best_ckpt) def calculate_eval_loss(self): - loss = { - "total_loss": [], - "iou_loss": [], - "l1_loss": [], - "conf_loss": [], - "cls_loss": [] - } for iter in range(self.max_val_iter): inps, targets = self.val_prefetcher.next() inps = inps.to(self.data_type) targets = targets.to(self.data_type) targets.requires_grad = False inps, targets = self.exp.preprocess(inps, targets, self.input_size) - with torch.cuda.amp.autocast(enabled=self.amp_training): outputs = self.model(inps, targets) - - loss["total_loss"].append(outputs["total_loss"]) - loss["iou_loss"].append(outputs["iou_loss"]) - loss["l1_loss"].append(outputs["l1_loss"]) - loss["conf_loss"].append(outputs["conf_loss"]) - loss["cls_loss"].append(outputs["cls_loss"]) - - progress_str = "epoch: {}/{},".format( - self.epoch + 1, self.max_epoch - ) - - for loss_name, loss_value in loss.items(): - progress_str += " {}: {:.1f}".format(loss_name, np.nanmean(loss_value)) - self.neptune[f"loss/val/{loss_name}"].log(np.nanmean(loss_value)) - - logger.info("Validation:{}".format(progress_str)) + loss = { + "total_loss": outputs["total_loss"], + "iou_loss": outputs["iou_loss"], + "l1_loss": outputs["l1_loss"], + "conf_loss": outputs["conf_loss"], + "cls_loss": outputs["cls_loss"] + } + progress_str = "epoch: {}/{}, iter: {}/{},".format( + self.epoch + 1, self.max_epoch, iter + 1, self.max_val_iter + ) + for loss_name, loss_value in loss.items(): + progress_str += " {}: {:.1f},".format(loss_name, loss_value) + self.neptune[f"loss/val/{loss_name}"].log(loss_value) + logger.info("Validation:{}".format(progress_str)) From 1c31df47418a12f29ec18fb3783380200ae16ef8 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 24 May 2022 11:10:55 +0000 Subject: [PATCH 15/15] mosaic_prob !=1 bug fix --- yolox/data/datasets/mosaicdetection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/yolox/data/datasets/mosaicdetection.py b/yolox/data/datasets/mosaicdetection.py index 708babed5..25a0b0625 100644 --- a/yolox/data/datasets/mosaicdetection.py +++ b/yolox/data/datasets/mosaicdetection.py @@ -6,6 +6,7 @@ import cv2 import numpy as np +import torch from yolox.utils import adjust_box_anns, get_local_rank @@ -151,6 +152,7 @@ def __getitem__(self, idx): # img_info and img_id are not used for training. # They are also hard to be specified on a mosaic image. # ----------------------------------------------------------------- + img_id = torch.tensor(np.array(img_id), dtype=torch.long) return mix_img, padded_labels, img_info, img_id else: