diff --git a/exps/algos-v2/search-cell.py b/exps/algos-v2/search-cell.py index 976bc0a..a5c60dc 100644 --- a/exps/algos-v2/search-cell.py +++ b/exps/algos-v2/search-cell.py @@ -338,8 +338,7 @@ def main(xargs): else: extra_info = {'class_num': class_num, 'xshape': xshape, 'epochs': xargs.overwite_epochs} config = load_config(xargs.config_path, extra_info, logger) - search_loader, train_loader, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', \ - (config.batch_size, config.test_batch_size), xargs.workers) + search_loader, train_loader, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', (config.batch_size, config.test_batch_size), xargs.workers) logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config)) diff --git a/exps/algos-v2/search-size.py b/exps/algos-v2/search-size.py new file mode 100644 index 0000000..1eebd4e --- /dev/null +++ b/exps/algos-v2/search-size.py @@ -0,0 +1,334 @@ +################################################## +# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 # +###################################################################################### +# python ./exps/algos-v2/search-size.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo tas --rand_seed 777 +# python ./exps/algos-v2/search-size.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo tas --rand_seed 777 +# python ./exps/algos-v2/search-size.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo tas --rand_seed 777 +#### +# python ./exps/algos-v2/search-size.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo fbv2 --rand_seed 777 +# python ./exps/algos-v2/search-size.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo fbv2 --rand_seed 777 +# python ./exps/algos-v2/search-size.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo fbv2 --rand_seed 777 +###################################################################################### +import os, sys, time, random, argparse +import numpy as np +from copy import deepcopy +import torch +import torch.nn as nn +from pathlib import Path +lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve() +if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) +from config_utils import load_config, dict2config, configure2str +from datasets import get_datasets, get_nas_search_loaders +from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler +from utils import count_parameters_in_MB, obtain_accuracy +from log_utils import AverageMeter, time_string, convert_secs2time +from models import get_cell_based_tiny_net, get_search_spaces +from nas_201_api import NASBench301API as API + + +def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger): + data_time, batch_time = AverageMeter(), AverageMeter() + base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() + arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() + end = time.time() + network.train() + for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): + scheduler.update(None, 1.0 * step / len(xloader)) + base_inputs = base_inputs.cuda(non_blocking=True) + arch_inputs = arch_inputs.cuda(non_blocking=True) + base_targets = base_targets.cuda(non_blocking=True) + arch_targets = arch_targets.cuda(non_blocking=True) + # measure data loading time + data_time.update(time.time() - end) + + # Update the weights + network.zero_grad() + _, logits = network(base_inputs) + base_loss = criterion(logits, base_targets) + base_loss.backward() + w_optimizer.step() + # record + base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) + base_losses.update(base_loss.item(), base_inputs.size(0)) + base_top1.update (base_prec1.item(), base_inputs.size(0)) + base_top5.update (base_prec5.item(), base_inputs.size(0)) + + # update the architecture-weight + network.zero_grad() + _, logits = network(arch_inputs) + arch_loss = criterion(logits, arch_targets) + arch_loss.backward() + a_optimizer.step() + # record + arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) + arch_losses.update(arch_loss.item(), arch_inputs.size(0)) + arch_top1.update (arch_prec1.item(), arch_inputs.size(0)) + arch_top5.update (arch_prec5.item(), arch_inputs.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if step % print_freq == 0 or step + 1 == len(xloader): + Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) + Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) + Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5) + Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5) + logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) + return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg + + +def train_controller(xloader, network, criterion, optimizer, prev_baseline, epoch_str, print_freq, logger): + # config. (containing some necessary arg) + # baseline: The baseline score (i.e. average val_acc) from the previous epoch + data_time, batch_time = AverageMeter(), AverageMeter() + GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time() + + controller_num_aggregate = 20 + controller_train_steps = 50 + controller_bl_dec = 0.99 + controller_entropy_weight = 0.0001 + + network.eval() + network.controller.train() + network.controller.zero_grad() + loader_iter = iter(xloader) + for step in range(controller_train_steps * controller_num_aggregate): + try: + inputs, targets = next(loader_iter) + except: + loader_iter = iter(xloader) + inputs, targets = next(loader_iter) + inputs = inputs.cuda(non_blocking=True) + targets = targets.cuda(non_blocking=True) + # measure data loading time + data_time.update(time.time() - xend) + + log_prob, entropy, sampled_arch = network.controller() + with torch.no_grad(): + network.set_cal_mode('dynamic', sampled_arch) + _, logits = network(inputs) + val_top1, val_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) + val_top1 = val_top1.view(-1) / 100 + reward = val_top1 + controller_entropy_weight * entropy + if prev_baseline is None: + baseline = val_top1 + else: + baseline = prev_baseline - (1 - controller_bl_dec) * (prev_baseline - reward) + + loss = -1 * log_prob * (reward - baseline) + + # account + RewardMeter.update(reward.item()) + BaselineMeter.update(baseline.item()) + ValAccMeter.update(val_top1.item()*100) + LossMeter.update(loss.item()) + EntropyMeter.update(entropy.item()) + + # Average gradient over controller_num_aggregate samples + loss = loss / controller_num_aggregate + loss.backward(retain_graph=True) + + # measure elapsed time + batch_time.update(time.time() - xend) + xend = time.time() + if (step+1) % controller_num_aggregate == 0: + grad_norm = torch.nn.utils.clip_grad_norm_(network.controller.parameters(), 5.0) + GradnormMeter.update(grad_norm) + optimizer.step() + network.controller.zero_grad() + + if step % print_freq == 0: + Sstr = '*Train-Controller* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, controller_train_steps * controller_num_aggregate) + Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) + Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter) + Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg) + logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Estr) + + return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg + + +def valid_func(xloader, network, criterion, logger): + data_time, batch_time = AverageMeter(), AverageMeter() + arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() + end = time.time() + with torch.no_grad(): + network.eval() + for step, (arch_inputs, arch_targets) in enumerate(xloader): + arch_targets = arch_targets.cuda(non_blocking=True) + # measure data loading time + data_time.update(time.time() - end) + # prediction + _, logits = network(arch_inputs.cuda(non_blocking=True)) + arch_loss = criterion(logits, arch_targets) + # record + arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) + arch_losses.update(arch_loss.item(), arch_inputs.size(0)) + arch_top1.update (arch_prec1.item(), arch_inputs.size(0)) + arch_top5.update (arch_prec5.item(), arch_inputs.size(0)) + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + return arch_losses.avg, arch_top1.avg, arch_top5.avg + + +def main(xargs): + assert torch.cuda.is_available(), 'CUDA is not available.' + torch.backends.cudnn.enabled = True + torch.backends.cudnn.benchmark = False + torch.backends.cudnn.deterministic = True + torch.set_num_threads( xargs.workers ) + prepare_seed(xargs.rand_seed) + logger = prepare_logger(args) + + train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1) + if xargs.overwite_epochs is None: + extra_info = {'class_num': class_num, 'xshape': xshape} + else: + extra_info = {'class_num': class_num, 'xshape': xshape, 'epochs': xargs.overwite_epochs} + config = load_config(xargs.config_path, extra_info, logger) + search_loader, train_loader, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', (config.batch_size, config.test_batch_size), xargs.workers) + logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) + logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config)) + + search_space = get_search_spaces(xargs.search_space, 'nas-bench-301') + + model_config = dict2config( + dict(name='generic', super_type='search-shape', candidate_Cs=search_space['candidates'], max_num_Cs=search_space['numbers'], num_classes=class_num, + genotype=args.genotype, affine=bool(xargs.affine), track_running_stats=bool(xargs.track_running_stats)), None) + logger.log('search space : {:}'.format(search_space)) + logger.log('model config : {:}'.format(model_config)) + search_model = get_cell_based_tiny_net(model_config) + search_model.set_algo(xargs.algo) + logger.log('{:}'.format(search_model)) + + w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.weights, config) + a_optimizer = torch.optim.Adam(search_model.alphas, lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, eps=xargs.arch_eps) + logger.log('w-optimizer : {:}'.format(w_optimizer)) + logger.log('a-optimizer : {:}'.format(a_optimizer)) + logger.log('w-scheduler : {:}'.format(w_scheduler)) + logger.log('criterion : {:}'.format(criterion)) + params = count_parameters_in_MB(search_model) + logger.log('The parameters of the search model = {:.2f} MB'.format(params)) + logger.log('search-space : {:}'.format(search_space)) + try: + api = API(verbose=False) + except: + api = None + logger.log('{:} create API = {:} done'.format(time_string(), api)) + + last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best') + network, criterion = search_model.cuda(), criterion.cuda() # use a single GPU + + last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best') + + if last_info.exists(): # automatically resume from previous checkpoint + logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info)) + last_info = torch.load(last_info) + start_epoch = last_info['epoch'] + checkpoint = torch.load(last_info['last_checkpoint']) + genotypes = checkpoint['genotypes'] + valid_accuracies = checkpoint['valid_accuracies'] + search_model.load_state_dict( checkpoint['search_model'] ) + w_scheduler.load_state_dict ( checkpoint['w_scheduler'] ) + w_optimizer.load_state_dict ( checkpoint['w_optimizer'] ) + a_optimizer.load_state_dict ( checkpoint['a_optimizer'] ) + logger.log("=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)) + else: + logger.log("=> do not find the last-info file : {:}".format(last_info)) + start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {-1: network.random} + + # start training + start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup + for epoch in range(start_epoch, total_epoch): + w_scheduler.update(epoch, 0.0) + need_time = 'Time Left: {:}'.format(convert_secs2time(epoch_time.val * (total_epoch-epoch), True)) + epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) + logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr()))) + + if xargs.algo == 'fbv2' or xargs.algo == 'tas': + network.set_tau( xargs.tau_max - (xargs.tau_max-xargs.tau_min) * epoch / (total_epoch-1) ) + logger.log('[RESET tau as : {:}]'.format(network.tau)) + search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \ + = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger) + search_time.update(time.time() - start_time) + logger.log('[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) + logger.log('[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_a_loss, search_a_top1, search_a_top5)) + + genotype = network.genotype + logger.log('[{:}] - [get_best_arch] : {:}'.format(epoch_str, genotype)) + valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion, logger) + logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype)) + valid_accuracies[epoch] = valid_a_top1 + + genotypes[epoch] = genotype + logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch])) + # save checkpoint + save_path = save_checkpoint({'epoch' : epoch + 1, + 'args' : deepcopy(xargs), + 'search_model': search_model.state_dict(), + 'w_optimizer' : w_optimizer.state_dict(), + 'a_optimizer' : a_optimizer.state_dict(), + 'w_scheduler' : w_scheduler.state_dict(), + 'genotypes' : genotypes, + 'valid_accuracies' : valid_accuracies}, + model_base_path, logger) + last_info = save_checkpoint({ + 'epoch': epoch + 1, + 'args' : deepcopy(args), + 'last_checkpoint': save_path, + }, logger.path('info'), logger) + with torch.no_grad(): + logger.log('{:}'.format(search_model.show_alphas())) + if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch], '90'))) + # measure elapsed time + epoch_time.update(time.time() - start_time) + start_time = time.time() + + # the final post procedure : count the time + start_time = time.time() + genotype = network.genotype + search_time.update(time.time() - start_time) + + valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion, logger) + logger.log('Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.'.format(genotype, valid_a_top1)) + + logger.log('\n' + '-'*100) + # check the performance from the architecture dataset + logger.log('[{:}] run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(xargs.algo, total_epoch, search_time.sum, genotype)) + if api is not None: logger.log('{:}'.format(api.query_by_arch(genotype, '90') )) + logger.close() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser("Weight sharing NAS methods to search for cells.") + parser.add_argument('--data_path' , type=str, help='Path to dataset') + parser.add_argument('--dataset' , type=str, choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.') + parser.add_argument('--search_space', type=str, default='sss', choices=['sss'], help='The search space name.') + parser.add_argument('--algo' , type=str, choices=['tas', 'fbv2', 'enas'], help='The search space name.') + parser.add_argument('--genotype' , type=str, default='|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|skip_connect~0|nor_conv_3x3~1|nor_conv_3x3~2|', help='The genotype.') + # FOR GDAS + parser.add_argument('--tau_min', type=float, default=0.1, help='The minimum tau for Gumbel Softmax.') + parser.add_argument('--tau_max', type=float, default=10, help='The maximum tau for Gumbel Softmax.') + # + parser.add_argument('--track_running_stats',type=int, default=0, choices=[0,1],help='Whether use track_running_stats or not in the BN layer.') + parser.add_argument('--affine' , type=int, default=0, choices=[0,1],help='Whether use affine=True or False in the BN layer.') + parser.add_argument('--config_path' , type=str, default='./configs/nas-benchmark/algos/weight-sharing.config', help='The path of configuration.') + parser.add_argument('--overwite_epochs', type=int, help='The number of epochs to overwrite that value in config files.') + # architecture leraning rate + parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding') + parser.add_argument('--arch_weight_decay' , type=float, default=1e-3, help='weight decay for arch encoding') + parser.add_argument('--arch_eps' , type=float, default=1e-8, help='weight decay for arch encoding') + # log + parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)') + parser.add_argument('--save_dir', type=str, default='./output/search', help='Folder to save checkpoints and log.') + parser.add_argument('--print_freq', type=int, default=200, help='print frequency (default: 200)') + parser.add_argument('--rand_seed', type=int, help='manual seed') + args = parser.parse_args() + if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000) + dirname = '{:}-affine{:}_BN{:}'.format(args.algo, args.affine, args.track_running_stats) + if args.overwite_epochs is not None: + dirname = dirname + '-E{:}'.format(args.overwite_epochs) + args.save_dir = os.path.join('{:}-{:}'.format(args.save_dir, args.search_space), args.dataset, dirname) + + main(args) diff --git a/exps/experimental/vis-bench-ws.py b/exps/experimental/vis-bench-ws.py index bbf7d67..b296913 100644 --- a/exps/experimental/vis-bench-ws.py +++ b/exps/experimental/vis-bench-ws.py @@ -33,6 +33,7 @@ def fetch_data(root_dir='./output/search', search_space='tss', dataset=None): alg2name['GDAS'] = 'gdas-affine0_BN0-None' alg2name['RSPS'] = 'random-affine0_BN0-None' alg2name['DARTS (1st)'] = 'darts-v1-affine0_BN0-None' + alg2name['ENAS'] = 'enas-affine0_BN0-None' """ alg2name['DARTS (2nd)'] = 'darts-v2-affine1_BN0-None' alg2name['SETN'] = 'setn-affine1_BN0-None' diff --git a/lib/models/__init__.py b/lib/models/__init__.py index debdecb..9413cc4 100644 --- a/lib/models/__init__.py +++ b/lib/models/__init__.py @@ -12,8 +12,8 @@ __all__ = ['change_key', 'get_cell_based_tiny_net', 'get_search_spaces', 'get_ci # useful modules from config_utils import dict2config -from .SharedUtils import change_key -from .cell_searchs import CellStructure, CellArchitectures +from models.SharedUtils import change_key +from models.cell_searchs import CellStructure, CellArchitectures # Cell-based NAS Models @@ -27,6 +27,10 @@ def get_cell_based_tiny_net(config): return nas_super_nets[config.name](config.C, config.N, config.max_nodes, config.num_classes, config.space, config.affine, config.track_running_stats) except: return nas_super_nets[config.name](config.C, config.N, config.max_nodes, config.num_classes, config.space) + elif super_type == 'search-shape': + from .shape_searchs import GenericNAS301Model + genotype = CellStructure.str2structure(config.genotype) + return GenericNAS301Model(config.candidate_Cs, config.max_num_Cs, genotype, config.num_classes, config.affine, config.track_running_stats) elif super_type == 'nasnet-super': from .cell_searchs import nasnet_super_nets as nas_super_nets return nas_super_nets[config.name](config.C, config.N, config.steps, config.multiplier, \ diff --git a/lib/models/cell_infers/cells.py b/lib/models/cell_infers/cells.py index 2dbb925..0e9aae4 100644 --- a/lib/models/cell_infers/cells.py +++ b/lib/models/cell_infers/cells.py @@ -5,13 +5,14 @@ import torch import torch.nn as nn from copy import deepcopy -from ..cell_operations import OPS + +from models.cell_operations import OPS # Cell for NAS-Bench-201 class InferCell(nn.Module): - def __init__(self, genotype, C_in, C_out, stride): + def __init__(self, genotype, C_in, C_out, stride, affine=True, track_running_stats=True): super(InferCell, self).__init__() self.layers = nn.ModuleList() @@ -24,9 +25,9 @@ class InferCell(nn.Module): cur_innod = [] for (op_name, op_in) in node_info: if op_in == 0: - layer = OPS[op_name](C_in , C_out, stride, True, True) + layer = OPS[op_name](C_in , C_out, stride, affine, track_running_stats) else: - layer = OPS[op_name](C_out, C_out, 1, True, True) + layer = OPS[op_name](C_out, C_out, 1, affine, track_running_stats) cur_index.append( len(self.layers) ) cur_innod.append( op_in ) self.layers.append( layer ) diff --git a/lib/models/cell_operations.py b/lib/models/cell_operations.py index c7528c1..f80c840 100644 --- a/lib/models/cell_operations.py +++ b/lib/models/cell_operations.py @@ -74,17 +74,17 @@ class DualSepConv(nn.Module): class ResNetBasicblock(nn.Module): - def __init__(self, inplanes, planes, stride, affine=True): + def __init__(self, inplanes, planes, stride, affine=True, track_running_stats=True): super(ResNetBasicblock, self).__init__() assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride) - self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1, affine) - self.conv_b = ReLUConvBN( planes, planes, 3, 1, 1, 1, affine) + self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1, affine, track_running_stats) + self.conv_b = ReLUConvBN( planes, planes, 3, 1, 1, 1, affine, track_running_stats) if stride == 2: self.downsample = nn.Sequential( nn.AvgPool2d(kernel_size=2, stride=2, padding=0), nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)) elif inplanes != planes: - self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1, affine) + self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1, affine, track_running_stats) else: self.downsample = None self.in_dim = inplanes diff --git a/lib/models/shape_searchs/__init__.py b/lib/models/shape_searchs/__init__.py index 554f035..500167d 100644 --- a/lib/models/shape_searchs/__init__.py +++ b/lib/models/shape_searchs/__init__.py @@ -6,3 +6,4 @@ from .SearchCifarResNet_depth import SearchDepthCifarResNet from .SearchCifarResNet import SearchShapeCifarResNet from .SearchSimResNet_width import SearchWidthSimResNet from .SearchImagenetResNet import SearchShapeImagenetResNet +from .generic_size_tiny_cell_model import GenericNAS301Model diff --git a/lib/models/shape_searchs/generic_size_tiny_cell_model.py b/lib/models/shape_searchs/generic_size_tiny_cell_model.py new file mode 100644 index 0000000..0996597 --- /dev/null +++ b/lib/models/shape_searchs/generic_size_tiny_cell_model.py @@ -0,0 +1,139 @@ +##################################################### +# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # +##################################################### +from typing import List, Text, Any +import random, torch +import torch.nn as nn + +from models.cell_operations import ResNetBasicblock +from models.cell_infers.cells import InferCell +from models.shape_searchs.SoftSelect import select2withP, ChannelWiseInter + + +class GenericNAS301Model(nn.Module): + + def __init__(self, candidate_Cs: List[int], max_num_Cs: int, genotype: Any, num_classes: int, affine: bool, track_running_stats: bool): + super(GenericNAS301Model, self).__init__() + self._max_num_Cs = max_num_Cs + self._candidate_Cs = candidate_Cs + if max_num_Cs % 3 != 2: + raise ValueError('invalid number of layers : {:}'.format(max_num_Cs)) + self._num_stage = N = max_num_Cs // 3 + self._max_C = max(candidate_Cs) + + stem = nn.Sequential( + nn.Conv2d(3, self._max_C, kernel_size=3, padding=1, bias=not affine), + nn.BatchNorm2d(self._max_C, affine=affine, track_running_stats=track_running_stats)) + + layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N + + c_prev = self._max_C + self._cells = nn.ModuleList() + self._cells.append(stem) + for index, reduction in enumerate(layer_reductions): + if reduction : cell = ResNetBasicblock(c_prev, self._max_C, 2, True) + else : cell = InferCell(genotype, c_prev, self._max_C, 1, affine, track_running_stats) + self._cells.append(cell) + c_prev = cell.out_dim + self._num_layer = len(self._cells) + + self.lastact = nn.Sequential(nn.BatchNorm2d(c_prev, affine=affine, track_running_stats=track_running_stats), nn.ReLU(inplace=True)) + self.global_pooling = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(c_prev, num_classes) + # algorithm related + self.register_buffer('_tau', torch.zeros(1)) + self._algo = None + + def set_algo(self, algo: Text): + # used for searching + assert self._algo is None, 'This functioin can only be called once.' + assert algo in ['fbv2', 'enas', 'tas'], 'invalid algo : {:}'.format(algo) + self._algo = algo + self._arch_parameters = nn.Parameter(1e-3*torch.randn(self._max_num_Cs, len(self._candidate_Cs))) + if algo == 'fbv2' or algo == 'enas': + self.register_buffer('_masks', torch.zeros(len(self._candidate_Cs), max(self._candidate_Cs))) + for i in range(len(self._candidate_Cs)): + self._masks.data[i, :self._candidate_Cs[i]] = 1 + + @property + def tau(self): + return self._tau + + def set_tau(self, tau): + self._tau.data[:] = tau + + @property + def weights(self): + xlist = list(self._cells.parameters()) + xlist+= list(self.lastact.parameters()) + xlist+= list(self.global_pooling.parameters()) + xlist+= list(self.classifier.parameters()) + return xlist + + @property + def alphas(self): + return [self._arch_parameters] + + def show_alphas(self): + with torch.no_grad(): + return 'arch-parameters :\n{:}'.format(nn.functional.softmax(self._arch_parameters, dim=-1).cpu()) + + @property + def random(self): + cs = [] + for i in range(self._max_num_Cs): + index = random.randint(0, len(self._candidate_Cs)-1) + cs.append(str(self._candidate_Cs[index])) + return ':'.join(cs) + + @property + def genotype(self): + cs = [] + for i in range(self._max_num_Cs): + with torch.no_grad(): + index = self._arch_parameters[i].argmax().item() + cs.append(str(self._candidate_Cs[index])) + return ':'.join(cs) + + def get_message(self) -> Text: + string = self.extra_repr() + for i, cell in enumerate(self._cells): + string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self._cells), cell.extra_repr()) + return string + + def extra_repr(self): + return ('{name}(candidates={_candidate_Cs}, num={_max_num_Cs}, N={_num_stage}, L={_num_layer})'.format(name=self.__class__.__name__, **self.__dict__)) + + def forward(self, inputs): + feature = inputs + for i, cell in enumerate(self._cells): + feature = cell(feature) + if self._algo == 'fbv2': + idx = max(0, i-1) + weights = nn.functional.gumbel_softmax(self._arch_parameters[idx:idx+1], tau=self.tau, dim=-1) + mask = torch.matmul(weights, self._masks).view(1, -1, 1, 1) + feature = feature * mask + elif self._algo == 'tas': + idx = max(0, i-1) + selected_cs, selected_probs = select2withP(self._arch_parameters[idx:idx+1], self.tau, num=2) + with torch.no_grad(): + i1, i2 = selected_cs.cpu().view(-1).tolist() + c1, c2 = self._candidate_Cs[i1], self._candidate_Cs[i2] + out_channel = max(c1, c2) + out1 = ChannelWiseInter(feature[:, :c1], out_channel) + out2 = ChannelWiseInter(feature[:, :c2], out_channel) + out = out1 * selected_probs[0, 0] + out2 * selected_probs[0, 1] + if feature.shape[1] == out.shape[1]: + feature = out + else: + miss = torch.zeros(feature.shape[0], feature.shape[1]-out.shape[1], feature.shape[2], feature.shape[3], device=feature.device) + feature = torch.cat((out, miss), dim=1) + else: + raise ValueError('invalid algorithm : {:}'.format(self._algo)) + + out = self.lastact(feature) + out = self.global_pooling(out) + out = out.view(out.size(0), -1) + logits = self.classifier(out) + + return out, logits