add autodl
This commit is contained in:
		
							
								
								
									
										289
									
								
								AutoDL-Projects/exps/basic/KD-main.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										289
									
								
								AutoDL-Projects/exps/basic/KD-main.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,289 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import sys, time, torch, random, argparse | ||||
| from PIL import ImageFile | ||||
|  | ||||
| ImageFile.LOAD_TRUNCATED_IMAGES = True | ||||
| from copy import deepcopy | ||||
| from pathlib import Path | ||||
|  | ||||
| from xautodl.config_utils import load_config, obtain_cls_kd_args as obtain_args | ||||
| from xautodl.procedures import ( | ||||
|     prepare_seed, | ||||
|     prepare_logger, | ||||
|     save_checkpoint, | ||||
|     copy_checkpoint, | ||||
| ) | ||||
| from xautodl.procedures import get_optim_scheduler, get_procedures | ||||
| from xautodl.datasets import get_datasets | ||||
| from xautodl.models import obtain_model, load_net_from_checkpoint | ||||
| from xautodl.utils import get_model_infos | ||||
| from xautodl.log_utils import AverageMeter, time_string, convert_secs2time | ||||
|  | ||||
|  | ||||
| def main(args): | ||||
|     assert torch.cuda.is_available(), "CUDA is not available." | ||||
|     torch.backends.cudnn.enabled = True | ||||
|     torch.backends.cudnn.benchmark = True | ||||
|     # torch.backends.cudnn.deterministic = True | ||||
|     # torch.set_num_threads(args.workers) | ||||
|  | ||||
|     prepare_seed(args.rand_seed) | ||||
|     logger = prepare_logger(args) | ||||
|  | ||||
|     train_data, valid_data, xshape, class_num = get_datasets( | ||||
|         args.dataset, args.data_path, args.cutout_length | ||||
|     ) | ||||
|     train_loader = torch.utils.data.DataLoader( | ||||
|         train_data, | ||||
|         batch_size=args.batch_size, | ||||
|         shuffle=True, | ||||
|         num_workers=args.workers, | ||||
|         pin_memory=True, | ||||
|     ) | ||||
|     valid_loader = torch.utils.data.DataLoader( | ||||
|         valid_data, | ||||
|         batch_size=args.batch_size, | ||||
|         shuffle=False, | ||||
|         num_workers=args.workers, | ||||
|         pin_memory=True, | ||||
|     ) | ||||
|     # get configures | ||||
|     model_config = load_config(args.model_config, {"class_num": class_num}, logger) | ||||
|     optim_config = load_config( | ||||
|         args.optim_config, | ||||
|         { | ||||
|             "class_num": class_num, | ||||
|             "KD_alpha": args.KD_alpha, | ||||
|             "KD_temperature": args.KD_temperature, | ||||
|         }, | ||||
|         logger, | ||||
|     ) | ||||
|  | ||||
|     # load checkpoint | ||||
|     teacher_base = load_net_from_checkpoint(args.KD_checkpoint) | ||||
|     teacher = torch.nn.DataParallel(teacher_base).cuda() | ||||
|  | ||||
|     base_model = obtain_model(model_config) | ||||
|     flop, param = get_model_infos(base_model, xshape) | ||||
|     logger.log("Student ====>>>>:\n{:}".format(base_model)) | ||||
|     logger.log("Teacher ====>>>>:\n{:}".format(teacher_base)) | ||||
|     logger.log("model information : {:}".format(base_model.get_message())) | ||||
|     logger.log("-" * 50) | ||||
|     logger.log( | ||||
|         "Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( | ||||
|             param, flop, flop / 1e3 | ||||
|         ) | ||||
|     ) | ||||
|     logger.log("-" * 50) | ||||
|     logger.log("train_data : {:}".format(train_data)) | ||||
|     logger.log("valid_data : {:}".format(valid_data)) | ||||
|     optimizer, scheduler, criterion = get_optim_scheduler( | ||||
|         base_model.parameters(), optim_config | ||||
|     ) | ||||
|     logger.log("optimizer  : {:}".format(optimizer)) | ||||
|     logger.log("scheduler  : {:}".format(scheduler)) | ||||
|     logger.log("criterion  : {:}".format(criterion)) | ||||
|  | ||||
|     last_info, model_base_path, model_best_path = ( | ||||
|         logger.path("info"), | ||||
|         logger.path("model"), | ||||
|         logger.path("best"), | ||||
|     ) | ||||
|     network, criterion = torch.nn.DataParallel(base_model).cuda(), criterion.cuda() | ||||
|  | ||||
|     if last_info.exists():  # automatically resume from previous checkpoint | ||||
|         logger.log( | ||||
|             "=> loading checkpoint of the last-info '{:}' start".format(last_info) | ||||
|         ) | ||||
|         last_info = torch.load(last_info) | ||||
|         start_epoch = last_info["epoch"] + 1 | ||||
|         checkpoint = torch.load(last_info["last_checkpoint"]) | ||||
|         base_model.load_state_dict(checkpoint["base-model"]) | ||||
|         scheduler.load_state_dict(checkpoint["scheduler"]) | ||||
|         optimizer.load_state_dict(checkpoint["optimizer"]) | ||||
|         valid_accuracies = checkpoint["valid_accuracies"] | ||||
|         max_bytes = checkpoint["max_bytes"] | ||||
|         logger.log( | ||||
|             "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( | ||||
|                 last_info, start_epoch | ||||
|             ) | ||||
|         ) | ||||
|     elif args.resume is not None: | ||||
|         assert Path(args.resume).exists(), "Can not find the resume file : {:}".format( | ||||
|             args.resume | ||||
|         ) | ||||
|         checkpoint = torch.load(args.resume) | ||||
|         start_epoch = checkpoint["epoch"] + 1 | ||||
|         base_model.load_state_dict(checkpoint["base-model"]) | ||||
|         scheduler.load_state_dict(checkpoint["scheduler"]) | ||||
|         optimizer.load_state_dict(checkpoint["optimizer"]) | ||||
|         valid_accuracies = checkpoint["valid_accuracies"] | ||||
|         max_bytes = checkpoint["max_bytes"] | ||||
|         logger.log( | ||||
|             "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( | ||||
|                 args.resume, start_epoch | ||||
|             ) | ||||
|         ) | ||||
|     elif args.init_model is not None: | ||||
|         assert Path( | ||||
|             args.init_model | ||||
|         ).exists(), "Can not find the initialization file : {:}".format(args.init_model) | ||||
|         checkpoint = torch.load(args.init_model) | ||||
|         base_model.load_state_dict(checkpoint["base-model"]) | ||||
|         start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} | ||||
|         logger.log("=> initialize the model from {:}".format(args.init_model)) | ||||
|     else: | ||||
|         logger.log("=> do not find the last-info file : {:}".format(last_info)) | ||||
|         start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} | ||||
|  | ||||
|     train_func, valid_func = get_procedures(args.procedure) | ||||
|  | ||||
|     total_epoch = optim_config.epochs + optim_config.warmup | ||||
|     # Main Training and Evaluation Loop | ||||
|     start_time = time.time() | ||||
|     epoch_time = AverageMeter() | ||||
|     for epoch in range(start_epoch, total_epoch): | ||||
|         scheduler.update(epoch, 0.0) | ||||
|         need_time = "Time Left: {:}".format( | ||||
|             convert_secs2time(epoch_time.avg * (total_epoch - epoch), True) | ||||
|         ) | ||||
|         epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) | ||||
|         LRs = scheduler.get_lr() | ||||
|         find_best = False | ||||
|  | ||||
|         logger.log( | ||||
|             "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}".format( | ||||
|                 time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler | ||||
|             ) | ||||
|         ) | ||||
|  | ||||
|         # train for one epoch | ||||
|         train_loss, train_acc1, train_acc5 = train_func( | ||||
|             train_loader, | ||||
|             teacher, | ||||
|             network, | ||||
|             criterion, | ||||
|             scheduler, | ||||
|             optimizer, | ||||
|             optim_config, | ||||
|             epoch_str, | ||||
|             args.print_freq, | ||||
|             logger, | ||||
|         ) | ||||
|         # log the results | ||||
|         logger.log( | ||||
|             "***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}".format( | ||||
|                 time_string(), epoch_str, train_loss, train_acc1, train_acc5 | ||||
|             ) | ||||
|         ) | ||||
|  | ||||
|         # evaluate the performance | ||||
|         if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): | ||||
|             logger.log("-" * 150) | ||||
|             valid_loss, valid_acc1, valid_acc5 = valid_func( | ||||
|                 valid_loader, | ||||
|                 teacher, | ||||
|                 network, | ||||
|                 criterion, | ||||
|                 optim_config, | ||||
|                 epoch_str, | ||||
|                 args.print_freq_eval, | ||||
|                 logger, | ||||
|             ) | ||||
|             valid_accuracies[epoch] = valid_acc1 | ||||
|             logger.log( | ||||
|                 "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}".format( | ||||
|                     time_string(), | ||||
|                     epoch_str, | ||||
|                     valid_loss, | ||||
|                     valid_acc1, | ||||
|                     valid_acc5, | ||||
|                     valid_accuracies["best"], | ||||
|                     100 - valid_accuracies["best"], | ||||
|                 ) | ||||
|             ) | ||||
|             if valid_acc1 > valid_accuracies["best"]: | ||||
|                 valid_accuracies["best"] = valid_acc1 | ||||
|                 find_best = True | ||||
|                 logger.log( | ||||
|                     "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}.".format( | ||||
|                         epoch, | ||||
|                         valid_acc1, | ||||
|                         valid_acc5, | ||||
|                         100 - valid_acc1, | ||||
|                         100 - valid_acc5, | ||||
|                         model_best_path, | ||||
|                     ) | ||||
|                 ) | ||||
|             num_bytes = ( | ||||
|                 torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0 | ||||
|             ) | ||||
|             logger.log( | ||||
|                 "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format( | ||||
|                     next(network.parameters()).device, | ||||
|                     int(num_bytes), | ||||
|                     num_bytes / 1e3, | ||||
|                     num_bytes / 1e6, | ||||
|                     num_bytes / 1e9, | ||||
|                 ) | ||||
|             ) | ||||
|             max_bytes[epoch] = num_bytes | ||||
|         if epoch % 10 == 0: | ||||
|             torch.cuda.empty_cache() | ||||
|  | ||||
|         # save checkpoint | ||||
|         save_path = save_checkpoint( | ||||
|             { | ||||
|                 "epoch": epoch, | ||||
|                 "args": deepcopy(args), | ||||
|                 "max_bytes": deepcopy(max_bytes), | ||||
|                 "FLOP": flop, | ||||
|                 "PARAM": param, | ||||
|                 "valid_accuracies": deepcopy(valid_accuracies), | ||||
|                 "model-config": model_config._asdict(), | ||||
|                 "optim-config": optim_config._asdict(), | ||||
|                 "base-model": base_model.state_dict(), | ||||
|                 "scheduler": scheduler.state_dict(), | ||||
|                 "optimizer": optimizer.state_dict(), | ||||
|             }, | ||||
|             model_base_path, | ||||
|             logger, | ||||
|         ) | ||||
|         if find_best: | ||||
|             copy_checkpoint(model_base_path, model_best_path, logger) | ||||
|         last_info = save_checkpoint( | ||||
|             { | ||||
|                 "epoch": epoch, | ||||
|                 "args": deepcopy(args), | ||||
|                 "last_checkpoint": save_path, | ||||
|             }, | ||||
|             logger.path("info"), | ||||
|             logger, | ||||
|         ) | ||||
|  | ||||
|         # measure elapsed time | ||||
|         epoch_time.update(time.time() - start_time) | ||||
|         start_time = time.time() | ||||
|  | ||||
|     logger.log("\n" + "-" * 200) | ||||
|     logger.log( | ||||
|         "||| Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( | ||||
|             param, flop, flop / 1e3 | ||||
|         ) | ||||
|     ) | ||||
|     logger.log( | ||||
|         "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}".format( | ||||
|             convert_secs2time(epoch_time.sum, True), | ||||
|             max(v for k, v in max_bytes.items()) / 1e6, | ||||
|             logger.path("info"), | ||||
|         ) | ||||
|     ) | ||||
|     logger.log("-" * 200 + "\n") | ||||
|     logger.close() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     args = obtain_args() | ||||
|     main(args) | ||||
							
								
								
									
										115
									
								
								AutoDL-Projects/exps/basic/basic-eval.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										115
									
								
								AutoDL-Projects/exps/basic/basic-eval.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,115 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import os, sys, time, torch, random, argparse | ||||
| from PIL import ImageFile | ||||
|  | ||||
| ImageFile.LOAD_TRUNCATED_IMAGES = True | ||||
| from copy import deepcopy | ||||
|  | ||||
| from xautodl.config_utils import load_config, dict2config | ||||
| from xautodl.procedures import get_procedures, get_optim_scheduler | ||||
| from xautodl.datasets import get_datasets | ||||
| from xautodl.models import obtain_model | ||||
| from xautodl.utils import get_model_infos | ||||
| from xautodl.log_utils import PrintLogger, time_string | ||||
|  | ||||
|  | ||||
| def main(args): | ||||
|  | ||||
|     assert os.path.isdir(args.data_path), "invalid data-path : {:}".format( | ||||
|         args.data_path | ||||
|     ) | ||||
|     assert os.path.isfile(args.checkpoint), "invalid checkpoint : {:}".format( | ||||
|         args.checkpoint | ||||
|     ) | ||||
|  | ||||
|     checkpoint = torch.load(args.checkpoint) | ||||
|     xargs = checkpoint["args"] | ||||
|     train_data, valid_data, xshape, class_num = get_datasets( | ||||
|         xargs.dataset, args.data_path, xargs.cutout_length | ||||
|     ) | ||||
|     valid_loader = torch.utils.data.DataLoader( | ||||
|         valid_data, | ||||
|         batch_size=xargs.batch_size, | ||||
|         shuffle=False, | ||||
|         num_workers=xargs.workers, | ||||
|         pin_memory=True, | ||||
|     ) | ||||
|  | ||||
|     logger = PrintLogger() | ||||
|     model_config = dict2config(checkpoint["model-config"], logger) | ||||
|     base_model = obtain_model(model_config) | ||||
|     flop, param = get_model_infos(base_model, xshape) | ||||
|     logger.log("model ====>>>>:\n{:}".format(base_model)) | ||||
|     logger.log("model information : {:}".format(base_model.get_message())) | ||||
|     logger.log("-" * 50) | ||||
|     logger.log( | ||||
|         "Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( | ||||
|             param, flop, flop / 1e3 | ||||
|         ) | ||||
|     ) | ||||
|     logger.log("-" * 50) | ||||
|     logger.log("valid_data : {:}".format(valid_data)) | ||||
|     optim_config = dict2config(checkpoint["optim-config"], logger) | ||||
|     _, _, criterion = get_optim_scheduler(base_model.parameters(), optim_config) | ||||
|     logger.log("criterion  : {:}".format(criterion)) | ||||
|     base_model.load_state_dict(checkpoint["base-model"]) | ||||
|     _, valid_func = get_procedures(xargs.procedure) | ||||
|     logger.log("initialize the CNN done, evaluate it using {:}".format(valid_func)) | ||||
|     network = torch.nn.DataParallel(base_model).cuda() | ||||
|  | ||||
|     try: | ||||
|         valid_loss, valid_acc1, valid_acc5 = valid_func( | ||||
|             valid_loader, | ||||
|             network, | ||||
|             criterion, | ||||
|             optim_config, | ||||
|             "pure-evaluation", | ||||
|             xargs.print_freq_eval, | ||||
|             logger, | ||||
|         ) | ||||
|     except: | ||||
|         _, valid_func = get_procedures("basic") | ||||
|         valid_loss, valid_acc1, valid_acc5 = valid_func( | ||||
|             valid_loader, | ||||
|             network, | ||||
|             criterion, | ||||
|             optim_config, | ||||
|             "pure-evaluation", | ||||
|             xargs.print_freq_eval, | ||||
|             logger, | ||||
|         ) | ||||
|  | ||||
|     num_bytes = torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0 | ||||
|     logger.log( | ||||
|         "***{:s}*** EVALUATION loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f}, error@1 = {:.2f}, error@5 = {:.2f}".format( | ||||
|             time_string(), | ||||
|             valid_loss, | ||||
|             valid_acc1, | ||||
|             valid_acc5, | ||||
|             100 - valid_acc1, | ||||
|             100 - valid_acc5, | ||||
|         ) | ||||
|     ) | ||||
|     logger.log( | ||||
|         "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format( | ||||
|             next(network.parameters()).device, | ||||
|             int(num_bytes), | ||||
|             num_bytes / 1e3, | ||||
|             num_bytes / 1e6, | ||||
|             num_bytes / 1e9, | ||||
|         ) | ||||
|     ) | ||||
|     logger.close() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     parser = argparse.ArgumentParser("Evaluate-CNN") | ||||
|     parser.add_argument("--data_path", type=str, help="Path to dataset.") | ||||
|     parser.add_argument( | ||||
|         "--checkpoint", type=str, help="Choose between Cifar10/100 and ImageNet." | ||||
|     ) | ||||
|     args = parser.parse_args() | ||||
|     assert torch.cuda.is_available(), "torch.cuda is not available" | ||||
|     main(args) | ||||
							
								
								
									
										291
									
								
								AutoDL-Projects/exps/basic/basic-main.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										291
									
								
								AutoDL-Projects/exps/basic/basic-main.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,291 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import sys, time, torch, random, argparse | ||||
| from PIL import ImageFile | ||||
|  | ||||
| ImageFile.LOAD_TRUNCATED_IMAGES = True | ||||
| from copy import deepcopy | ||||
| from pathlib import Path | ||||
|  | ||||
| from xautodl.datasets import get_datasets | ||||
| from xautodl.config_utils import load_config, obtain_basic_args as obtain_args | ||||
| from xautodl.procedures import ( | ||||
|     prepare_seed, | ||||
|     prepare_logger, | ||||
|     save_checkpoint, | ||||
|     copy_checkpoint, | ||||
| ) | ||||
| from xautodl.procedures import get_optim_scheduler, get_procedures | ||||
| from xautodl.models import obtain_model | ||||
| from xautodl.nas_infer_model import obtain_nas_infer_model | ||||
| from xautodl.utils import get_model_infos | ||||
| from xautodl.log_utils import AverageMeter, time_string, convert_secs2time | ||||
|  | ||||
|  | ||||
| def main(args): | ||||
|     assert torch.cuda.is_available(), "CUDA is not available." | ||||
|     torch.backends.cudnn.enabled = True | ||||
|     torch.backends.cudnn.benchmark = True | ||||
|     # torch.backends.cudnn.deterministic = True | ||||
|     # torch.set_num_threads(args.workers) | ||||
|  | ||||
|     prepare_seed(args.rand_seed) | ||||
|     logger = prepare_logger(args) | ||||
|  | ||||
|     train_data, valid_data, xshape, class_num = get_datasets( | ||||
|         args.dataset, args.data_path, args.cutout_length | ||||
|     ) | ||||
|     train_loader = torch.utils.data.DataLoader( | ||||
|         train_data, | ||||
|         batch_size=args.batch_size, | ||||
|         shuffle=True, | ||||
|         num_workers=args.workers, | ||||
|         pin_memory=True, | ||||
|     ) | ||||
|     valid_loader = torch.utils.data.DataLoader( | ||||
|         valid_data, | ||||
|         batch_size=args.batch_size, | ||||
|         shuffle=False, | ||||
|         num_workers=args.workers, | ||||
|         pin_memory=True, | ||||
|     ) | ||||
|     # get configures | ||||
|     model_config = load_config(args.model_config, {"class_num": class_num}, logger) | ||||
|     optim_config = load_config(args.optim_config, {"class_num": class_num}, logger) | ||||
|  | ||||
|     if args.model_source == "normal": | ||||
|         base_model = obtain_model(model_config) | ||||
|     elif args.model_source == "nas": | ||||
|         base_model = obtain_nas_infer_model(model_config, args.extra_model_path) | ||||
|     elif args.model_source == "autodl-searched": | ||||
|         base_model = obtain_model(model_config, args.extra_model_path) | ||||
|     else: | ||||
|         raise ValueError("invalid model-source : {:}".format(args.model_source)) | ||||
|     flop, param = get_model_infos(base_model, xshape) | ||||
|     logger.log("model ====>>>>:\n{:}".format(base_model)) | ||||
|     logger.log("model information : {:}".format(base_model.get_message())) | ||||
|     logger.log("-" * 50) | ||||
|     logger.log( | ||||
|         "Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( | ||||
|             param, flop, flop / 1e3 | ||||
|         ) | ||||
|     ) | ||||
|     logger.log("-" * 50) | ||||
|     logger.log("train_data : {:}".format(train_data)) | ||||
|     logger.log("valid_data : {:}".format(valid_data)) | ||||
|     optimizer, scheduler, criterion = get_optim_scheduler( | ||||
|         base_model.parameters(), optim_config | ||||
|     ) | ||||
|     logger.log("optimizer  : {:}".format(optimizer)) | ||||
|     logger.log("scheduler  : {:}".format(scheduler)) | ||||
|     logger.log("criterion  : {:}".format(criterion)) | ||||
|  | ||||
|     last_info, model_base_path, model_best_path = ( | ||||
|         logger.path("info"), | ||||
|         logger.path("model"), | ||||
|         logger.path("best"), | ||||
|     ) | ||||
|     network, criterion = torch.nn.DataParallel(base_model).cuda(), criterion.cuda() | ||||
|  | ||||
|     if last_info.exists():  # automatically resume from previous checkpoint | ||||
|         logger.log( | ||||
|             "=> loading checkpoint of the last-info '{:}' start".format(last_info) | ||||
|         ) | ||||
|         last_infox = torch.load(last_info) | ||||
|         start_epoch = last_infox["epoch"] + 1 | ||||
|         last_checkpoint_path = last_infox["last_checkpoint"] | ||||
|         if not last_checkpoint_path.exists(): | ||||
|             logger.log( | ||||
|                 "Does not find {:}, try another path".format(last_checkpoint_path) | ||||
|             ) | ||||
|             last_checkpoint_path = ( | ||||
|                 last_info.parent | ||||
|                 / last_checkpoint_path.parent.name | ||||
|                 / last_checkpoint_path.name | ||||
|             ) | ||||
|         checkpoint = torch.load(last_checkpoint_path) | ||||
|         base_model.load_state_dict(checkpoint["base-model"]) | ||||
|         scheduler.load_state_dict(checkpoint["scheduler"]) | ||||
|         optimizer.load_state_dict(checkpoint["optimizer"]) | ||||
|         valid_accuracies = checkpoint["valid_accuracies"] | ||||
|         max_bytes = checkpoint["max_bytes"] | ||||
|         logger.log( | ||||
|             "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( | ||||
|                 last_info, start_epoch | ||||
|             ) | ||||
|         ) | ||||
|     elif args.resume is not None: | ||||
|         assert Path(args.resume).exists(), "Can not find the resume file : {:}".format( | ||||
|             args.resume | ||||
|         ) | ||||
|         checkpoint = torch.load(args.resume) | ||||
|         start_epoch = checkpoint["epoch"] + 1 | ||||
|         base_model.load_state_dict(checkpoint["base-model"]) | ||||
|         scheduler.load_state_dict(checkpoint["scheduler"]) | ||||
|         optimizer.load_state_dict(checkpoint["optimizer"]) | ||||
|         valid_accuracies = checkpoint["valid_accuracies"] | ||||
|         max_bytes = checkpoint["max_bytes"] | ||||
|         logger.log( | ||||
|             "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( | ||||
|                 args.resume, start_epoch | ||||
|             ) | ||||
|         ) | ||||
|     elif args.init_model is not None: | ||||
|         assert Path( | ||||
|             args.init_model | ||||
|         ).exists(), "Can not find the initialization file : {:}".format(args.init_model) | ||||
|         checkpoint = torch.load(args.init_model) | ||||
|         base_model.load_state_dict(checkpoint["base-model"]) | ||||
|         start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} | ||||
|         logger.log("=> initialize the model from {:}".format(args.init_model)) | ||||
|     else: | ||||
|         logger.log("=> do not find the last-info file : {:}".format(last_info)) | ||||
|         start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} | ||||
|  | ||||
|     train_func, valid_func = get_procedures(args.procedure) | ||||
|  | ||||
|     total_epoch = optim_config.epochs + optim_config.warmup | ||||
|     # Main Training and Evaluation Loop | ||||
|     start_time = time.time() | ||||
|     epoch_time = AverageMeter() | ||||
|     for epoch in range(start_epoch, total_epoch): | ||||
|         scheduler.update(epoch, 0.0) | ||||
|         need_time = "Time Left: {:}".format( | ||||
|             convert_secs2time(epoch_time.avg * (total_epoch - epoch), True) | ||||
|         ) | ||||
|         epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) | ||||
|         LRs = scheduler.get_lr() | ||||
|         find_best = False | ||||
|         # set-up drop-out ratio | ||||
|         if hasattr(base_model, "update_drop_path"): | ||||
|             base_model.update_drop_path( | ||||
|                 model_config.drop_path_prob * epoch / total_epoch | ||||
|             ) | ||||
|         logger.log( | ||||
|             "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}".format( | ||||
|                 time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler | ||||
|             ) | ||||
|         ) | ||||
|  | ||||
|         # train for one epoch | ||||
|         train_loss, train_acc1, train_acc5 = train_func( | ||||
|             train_loader, | ||||
|             network, | ||||
|             criterion, | ||||
|             scheduler, | ||||
|             optimizer, | ||||
|             optim_config, | ||||
|             epoch_str, | ||||
|             args.print_freq, | ||||
|             logger, | ||||
|         ) | ||||
|         # log the results | ||||
|         logger.log( | ||||
|             "***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}".format( | ||||
|                 time_string(), epoch_str, train_loss, train_acc1, train_acc5 | ||||
|             ) | ||||
|         ) | ||||
|  | ||||
|         # evaluate the performance | ||||
|         if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): | ||||
|             logger.log("-" * 150) | ||||
|             valid_loss, valid_acc1, valid_acc5 = valid_func( | ||||
|                 valid_loader, | ||||
|                 network, | ||||
|                 criterion, | ||||
|                 optim_config, | ||||
|                 epoch_str, | ||||
|                 args.print_freq_eval, | ||||
|                 logger, | ||||
|             ) | ||||
|             valid_accuracies[epoch] = valid_acc1 | ||||
|             logger.log( | ||||
|                 "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}".format( | ||||
|                     time_string(), | ||||
|                     epoch_str, | ||||
|                     valid_loss, | ||||
|                     valid_acc1, | ||||
|                     valid_acc5, | ||||
|                     valid_accuracies["best"], | ||||
|                     100 - valid_accuracies["best"], | ||||
|                 ) | ||||
|             ) | ||||
|             if valid_acc1 > valid_accuracies["best"]: | ||||
|                 valid_accuracies["best"] = valid_acc1 | ||||
|                 find_best = True | ||||
|                 logger.log( | ||||
|                     "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}.".format( | ||||
|                         epoch, | ||||
|                         valid_acc1, | ||||
|                         valid_acc5, | ||||
|                         100 - valid_acc1, | ||||
|                         100 - valid_acc5, | ||||
|                         model_best_path, | ||||
|                     ) | ||||
|                 ) | ||||
|             num_bytes = ( | ||||
|                 torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0 | ||||
|             ) | ||||
|             logger.log( | ||||
|                 "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format( | ||||
|                     next(network.parameters()).device, | ||||
|                     int(num_bytes), | ||||
|                     num_bytes / 1e3, | ||||
|                     num_bytes / 1e6, | ||||
|                     num_bytes / 1e9, | ||||
|                 ) | ||||
|             ) | ||||
|             max_bytes[epoch] = num_bytes | ||||
|         if epoch % 10 == 0: | ||||
|             torch.cuda.empty_cache() | ||||
|  | ||||
|         # save checkpoint | ||||
|         save_path = save_checkpoint( | ||||
|             { | ||||
|                 "epoch": epoch, | ||||
|                 "args": deepcopy(args), | ||||
|                 "max_bytes": deepcopy(max_bytes), | ||||
|                 "FLOP": flop, | ||||
|                 "PARAM": param, | ||||
|                 "valid_accuracies": deepcopy(valid_accuracies), | ||||
|                 "model-config": model_config._asdict(), | ||||
|                 "optim-config": optim_config._asdict(), | ||||
|                 "base-model": base_model.state_dict(), | ||||
|                 "scheduler": scheduler.state_dict(), | ||||
|                 "optimizer": optimizer.state_dict(), | ||||
|             }, | ||||
|             model_base_path, | ||||
|             logger, | ||||
|         ) | ||||
|         if find_best: | ||||
|             copy_checkpoint(model_base_path, model_best_path, logger) | ||||
|         last_info = save_checkpoint( | ||||
|             { | ||||
|                 "epoch": epoch, | ||||
|                 "args": deepcopy(args), | ||||
|                 "last_checkpoint": save_path, | ||||
|             }, | ||||
|             logger.path("info"), | ||||
|             logger, | ||||
|         ) | ||||
|  | ||||
|         # measure elapsed time | ||||
|         epoch_time.update(time.time() - start_time) | ||||
|         start_time = time.time() | ||||
|  | ||||
|     logger.log("\n" + "-" * 200) | ||||
|     logger.log( | ||||
|         "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}".format( | ||||
|             convert_secs2time(epoch_time.sum, True), | ||||
|             max(v for k, v in max_bytes.items()) / 1e6, | ||||
|             logger.path("info"), | ||||
|         ) | ||||
|     ) | ||||
|     logger.log("-" * 200 + "\n") | ||||
|     logger.close() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     args = obtain_args() | ||||
|     main(args) | ||||
							
								
								
									
										157
									
								
								AutoDL-Projects/exps/basic/xmain.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										157
									
								
								AutoDL-Projects/exps/basic/xmain.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,157 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 # | ||||
| ##################################################### | ||||
| # python exps/basic/xmain.py --save_dir outputs/x   # | ||||
| ##################################################### | ||||
| import os, sys, time, torch, random, argparse | ||||
| from copy import deepcopy | ||||
| from pathlib import Path | ||||
|  | ||||
| lib_dir = (Path(__file__).parent / ".." / "..").resolve() | ||||
| print("LIB-DIR: {:}".format(lib_dir)) | ||||
| if str(lib_dir) not in sys.path: | ||||
|     sys.path.insert(0, str(lib_dir)) | ||||
|  | ||||
| from xautodl import xmisc | ||||
|  | ||||
|  | ||||
| def main(args): | ||||
|  | ||||
|     train_data = xmisc.nested_call_by_yaml(args.train_data_config, args.data_path) | ||||
|     valid_data = xmisc.nested_call_by_yaml(args.valid_data_config, args.data_path) | ||||
|     logger = xmisc.Logger(args.save_dir, prefix="seed-{:}-".format(args.rand_seed)) | ||||
|  | ||||
|     logger.log("Create the logger: {:}".format(logger)) | ||||
|     logger.log("Arguments : -------------------------------") | ||||
|     for name, value in args._get_kwargs(): | ||||
|         logger.log("{:16} : {:}".format(name, value)) | ||||
|     logger.log("Python  Version  : {:}".format(sys.version.replace("\n", " "))) | ||||
|     logger.log("PyTorch Version  : {:}".format(torch.__version__)) | ||||
|     logger.log("cuDNN   Version  : {:}".format(torch.backends.cudnn.version())) | ||||
|     logger.log("CUDA available   : {:}".format(torch.cuda.is_available())) | ||||
|     logger.log("CUDA GPU numbers : {:}".format(torch.cuda.device_count())) | ||||
|     logger.log( | ||||
|         "CUDA_VISIBLE_DEVICES : {:}".format( | ||||
|             os.environ["CUDA_VISIBLE_DEVICES"] | ||||
|             if "CUDA_VISIBLE_DEVICES" in os.environ | ||||
|             else "None" | ||||
|         ) | ||||
|     ) | ||||
|     logger.log("The training data is:\n{:}".format(train_data)) | ||||
|     logger.log("The validation data is:\n{:}".format(valid_data)) | ||||
|  | ||||
|     model = xmisc.nested_call_by_yaml(args.model_config) | ||||
|     logger.log("The model is:\n{:}".format(model)) | ||||
|     logger.log("The model size is {:.4f} M".format(xmisc.count_parameters(model))) | ||||
|  | ||||
|     train_loader = torch.utils.data.DataLoader( | ||||
|         train_data, | ||||
|         batch_sampler=xmisc.BatchSampler(train_data, args.batch_size, args.steps), | ||||
|         num_workers=args.workers, | ||||
|         pin_memory=True, | ||||
|     ) | ||||
|     valid_loader = torch.utils.data.DataLoader( | ||||
|         valid_data, | ||||
|         batch_size=args.batch_size, | ||||
|         shuffle=False, | ||||
|         num_workers=args.workers, | ||||
|         pin_memory=True, | ||||
|         drop_last=False, | ||||
|     ) | ||||
|     iters_per_epoch = len(train_data) // args.batch_size | ||||
|  | ||||
|     logger.log("The training loader: {:}".format(train_loader)) | ||||
|     logger.log("The validation loader: {:}".format(valid_loader)) | ||||
|     optimizer = xmisc.nested_call_by_yaml( | ||||
|         args.optim_config, | ||||
|         model.parameters(), | ||||
|         lr=args.lr, | ||||
|         weight_decay=args.weight_decay, | ||||
|     ) | ||||
|     objective = xmisc.nested_call_by_yaml(args.loss_config) | ||||
|     metric = xmisc.nested_call_by_yaml(args.metric_config) | ||||
|  | ||||
|     logger.log("The optimizer is:\n{:}".format(optimizer)) | ||||
|     logger.log("The objective is {:}".format(objective)) | ||||
|     logger.log("The metric is {:}".format(metric)) | ||||
|     logger.log( | ||||
|         "The iters_per_epoch = {:}, estimated epochs = {:}".format( | ||||
|             iters_per_epoch, args.steps // iters_per_epoch | ||||
|         ) | ||||
|     ) | ||||
|  | ||||
|     model, objective = torch.nn.DataParallel(model).cuda(), objective.cuda() | ||||
|     scheduler = xmisc.LRMultiplier( | ||||
|         optimizer, xmisc.get_scheduler(args.scheduler, args.lr), args.steps | ||||
|     ) | ||||
|  | ||||
|     start_time, iter_time = time.time(), xmisc.AverageMeter() | ||||
|     for xiter, data in enumerate(train_loader): | ||||
|         need_time = "Time Left: {:}".format( | ||||
|             xmisc.time_utils.convert_secs2time( | ||||
|                 iter_time.avg * (len(train_loader) - xiter), True | ||||
|             ) | ||||
|         ) | ||||
|         iter_str = "{:6d}/{:06d}".format(xiter, len(train_loader)) | ||||
|  | ||||
|         inputs, targets = data | ||||
|         targets = targets.cuda(non_blocking=True) | ||||
|         model.train() | ||||
|  | ||||
|         optimizer.zero_grad() | ||||
|         outputs = model(inputs) | ||||
|         loss = objective(outputs, targets) | ||||
|  | ||||
|         loss.backward() | ||||
|         optimizer.step() | ||||
|         scheduler.step() | ||||
|  | ||||
|         if xiter % iters_per_epoch == 0: | ||||
|             logger.log("TRAIN [{:}] loss = {:.6f}".format(iter_str, loss.item())) | ||||
|  | ||||
|         # measure elapsed time | ||||
|         iter_time.update(time.time() - start_time) | ||||
|         start_time = time.time() | ||||
|  | ||||
|     logger.log("-" * 200 + "\n") | ||||
|     logger.close() | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     parser = argparse.ArgumentParser( | ||||
|         description="Train a classification model with a loss function.", | ||||
|         formatter_class=argparse.ArgumentDefaultsHelpFormatter, | ||||
|     ) | ||||
|     parser.add_argument( | ||||
|         "--save_dir", type=str, help="Folder to save checkpoints and log." | ||||
|     ) | ||||
|     parser.add_argument("--resume", type=str, help="Resume path.") | ||||
|     parser.add_argument("--init_model", type=str, help="The initialization model path.") | ||||
|     parser.add_argument("--model_config", type=str, help="The path to the model config") | ||||
|     parser.add_argument("--optim_config", type=str, help="The optimizer config file.") | ||||
|     parser.add_argument("--loss_config", type=str, help="The loss config file.") | ||||
|     parser.add_argument("--metric_config", type=str, help="The metric config file.") | ||||
|     parser.add_argument( | ||||
|         "--train_data_config", type=str, help="The training dataset config path." | ||||
|     ) | ||||
|     parser.add_argument( | ||||
|         "--valid_data_config", type=str, help="The validation dataset config path." | ||||
|     ) | ||||
|     parser.add_argument("--data_path", type=str, help="The path to the dataset.") | ||||
|     # Optimization options | ||||
|     parser.add_argument("--lr", type=float, help="The learning rate") | ||||
|     parser.add_argument("--weight_decay", type=float, help="The weight decay") | ||||
|     parser.add_argument("--scheduler", type=str, help="The scheduler indicator.") | ||||
|     parser.add_argument("--steps", type=int, help="The total number of steps.") | ||||
|     parser.add_argument("--batch_size", type=int, default=256, help="The batch size.") | ||||
|     parser.add_argument("--workers", type=int, default=4, help="The number of workers") | ||||
|     # Random Seed | ||||
|     parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed") | ||||
|  | ||||
|     args = parser.parse_args() | ||||
|     if args.rand_seed is None or args.rand_seed < 0: | ||||
|         args.rand_seed = random.randint(1, 100000) | ||||
|     if args.save_dir is None: | ||||
|         raise ValueError("The save-path argument can not be None") | ||||
|  | ||||
|     main(args) | ||||
		Reference in New Issue
	
	Block a user