diff --git a/exps/algos-v2/search-cell.py b/exps/algos-v2/search-cell.py index 568eae9..e1ae220 100644 --- a/exps/algos-v2/search-cell.py +++ b/exps/algos-v2/search-cell.py @@ -4,6 +4,14 @@ # python ./exps/algos-v2/search-cell.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo darts-v1 --rand_seed 1 # python ./exps/algos-v2/search-cell.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo darts-v1 # python ./exps/algos-v2/search-cell.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo darts-v1 +#### +# python ./exps/algos-v2/search-cell.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo darts-v2 --rand_seed 1 +# python ./exps/algos-v2/search-cell.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo darts-v2 +# python ./exps/algos-v2/search-cell.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo darts-v2 +#### +# python ./exps/algos-v2/search-cell.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo gdas --rand_seed 1 +# python ./exps/algos-v2/search-cell.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo gdas +# python ./exps/algos-v2/search-cell.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo gdas ###################################################################################### import os, sys, time, random, argparse import numpy as np @@ -22,7 +30,7 @@ from models import get_cell_based_tiny_net, get_search_spaces from nas_201_api import NASBench201API as API -def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger): +def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() @@ -30,15 +38,26 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer network.train() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) + base_inputs = base_inputs.cuda(non_blocking=True) + arch_inputs = arch_inputs.cuda(non_blocking=True) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) - # update the weights - sampled_arch = network.module.dync_genotype(True) - network.module.set_cal_mode('dynamic', sampled_arch) - #network.module.set_cal_mode( 'urs' ) + # Update the weights + if algo == 'setn': + sampled_arch = network.dync_genotype(True) + network.set_cal_mode('dynamic', sampled_arch) + elif algo == 'gdas': + network.set_cal_mode('gdas', None) + elif algo.startswith('darts'): + network.set_cal_mode('joint', None) + elif algo == 'random': + network.set_cal_mode('urs', None) + else: + raise ValueError('Invalid algo name : {:}'.format(algo)) + network.zero_grad() _, logits = network(base_inputs) base_loss = criterion(logits, base_targets) @@ -51,7 +70,16 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer base_top5.update (base_prec5.item(), base_inputs.size(0)) # update the architecture-weight - network.module.set_cal_mode( 'joint' ) + if algo == 'setn': + network.set_cal_mode('joint') + elif algo == 'gdas': + network.set_cal_mode('gdas', None) + elif algo.startswith('darts'): + network.set_cal_mode('joint', None) + elif algo == 'random': + network.set_cal_mode('urs', None) + else: + raise ValueError('Invalid algo name : {:}'.format(algo)) network.zero_grad() _, logits = network(arch_inputs) arch_loss = criterion(logits, arch_targets) @@ -73,36 +101,38 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5) Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) - #print (nn.functional.softmax(network.module.arch_parameters, dim=-1)) - #print (network.module.arch_parameters) return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg -def get_best_arch(xloader, network, n_samples): +def get_best_arch(xloader, network, n_samples, algo): with torch.no_grad(): network.eval() - archs, valid_accs = network.module.return_topK(n_samples), [] - #print ('obtain the top-{:} architectures'.format(n_samples)) + if algo == 'random': + archs, valid_accs = network.return_topK(n_samples, True), [] + elif algo == 'setn': + archs, valid_accs = network.return_topK(n_samples, False), [] + elif algo.startswith('darts') or algo == 'gdas': + arch = network.genotype + archs, valid_accs = [arch], [] + else: + raise ValueError('Invalid algorithm name : {:}'.format(algo)) loader_iter = iter(xloader) for i, sampled_arch in enumerate(archs): - network.module.set_cal_mode('dynamic', sampled_arch) + network.set_cal_mode('dynamic', sampled_arch) try: inputs, targets = next(loader_iter) except: loader_iter = iter(xloader) inputs, targets = next(loader_iter) - - _, logits = network(inputs) + _, logits = network(inputs.cuda(non_blocking=True)) val_top1, val_top5 = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5)) - valid_accs.append(val_top1.item()) - best_idx = np.argmax(valid_accs) best_arch, best_valid_acc = archs[best_idx], valid_accs[best_idx] return best_arch, best_valid_acc -def valid_func(xloader, network, criterion): +def valid_func(xloader, network, criterion, algo, logger): data_time, batch_time = AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() end = time.time() @@ -113,7 +143,7 @@ def valid_func(xloader, network, criterion): # measure data loading time data_time.update(time.time() - end) # prediction - _, logits = network(arch_inputs) + _, logits = network(arch_inputs.cuda(non_blocking=True)) arch_loss = criterion(logits, arch_targets) # record arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) @@ -166,7 +196,6 @@ def main(xargs): logger.log('{:} create API = {:} done'.format(time_string(), api)) last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best') - # network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda() network, criterion = search_model.cuda(), criterion.cuda() # use a single GPU last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best') @@ -185,7 +214,7 @@ def main(xargs): logger.log("=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) - start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {} + start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {-1: network.return_topK(1, True)[0]} # start training start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup @@ -195,28 +224,25 @@ def main(xargs): epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr()))) - import pdb; pdb.set_trace() - search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \ - = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger) + = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, xargs.algo, logger) search_time.update(time.time() - start_time) logger.log('[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) logger.log('[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_a_loss, search_a_top1, search_a_top5)) - genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) - network.module.set_cal_mode('dynamic', genotype) - valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) + genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.eval_candidate_num, xargs.algo) + if xargs.algo == 'setn': + network.set_cal_mode('dynamic', genotype) + elif xargs.algo == 'gdas': + network.set_cal_mode('gdas', None) + elif xargs.algo.startswith('darts'): + network.set_cal_mode('joint', None) + elif xargs.algo == 'random': + network.set_cal_mode('urs', None) + else: + raise ValueError('Invalid algorithm name : {:}'.format(xargs.algo)) + valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion, xargs.algo, logger) logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype)) - #search_model.set_cal_mode('urs') - #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) - #logger.log('[{:}] URS---evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) - #search_model.set_cal_mode('joint') - #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) - #logger.log('[{:}] JOINT-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) - #search_model.set_cal_mode('select') - #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) - #logger.log('[{:}] Selec-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) - # check the best accuracy valid_accuracies[epoch] = valid_a_top1 genotypes[epoch] = genotype @@ -245,15 +271,25 @@ def main(xargs): # the final post procedure : count the time start_time = time.time() - genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) + genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.eval_candidate_num, xargs.algo) + if xargs.algo == 'setn': + network.set_cal_mode('dynamic', genotype) + elif xargs.algo == 'gdas': + network.set_cal_mode('gdas', None) + elif xargs.algo.startswith('darts'): + network.set_cal_mode('joint', None) + elif xargs.algo == 'random': + network.set_cal_mode('urs', None) + else: + raise ValueError('Invalid algorithm name : {:}'.format(xargs.algo)) search_time.update(time.time() - start_time) - network.module.set_cal_mode('dynamic', genotype) - valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) + + valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion, xargs.algo, logger) logger.log('Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.'.format(genotype, valid_a_top1)) logger.log('\n' + '-'*100) # check the performance from the architecture dataset - logger.log('SETN : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(total_epoch, search_time.sum, genotype)) + logger.log('[{:}] run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(xargs.algo, total_epoch, search_time.sum, genotype)) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotype, '200') )) logger.close() @@ -281,7 +317,7 @@ if __name__ == '__main__': # log parser.add_argument('--workers', type=int, default=2, help='number of data loading workers (default: 2)') parser.add_argument('--save_dir', type=str, default='./output/search', help='Folder to save checkpoints and log.') - parser.add_argument('--print_freq', type=int, help='print frequency (default: 200)') + parser.add_argument('--print_freq', type=int, default=200, help='print frequency (default: 200)') parser.add_argument('--rand_seed', type=int, help='manual seed') args = parser.parse_args() if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000) diff --git a/lib/models/cell_operations.py b/lib/models/cell_operations.py index ce1c258..cb809ca 100644 --- a/lib/models/cell_operations.py +++ b/lib/models/cell_operations.py @@ -242,6 +242,16 @@ class PartAwareOp(nn.Module): return outputs +def drop_path(x, drop_prob): + if drop_prob > 0.: + keep_prob = 1. - drop_prob + mask = x.new_zeros(x.size(0), 1, 1, 1) + mask = mask.bernoulli_(keep_prob) + x = torch.div(x, keep_prob) + x.mul_(mask) + return x + + # Searching for A Robust Neural Architecture in Four GPU Hours class GDAS_Reduction_Cell(nn.Module): diff --git a/lib/models/cell_searchs/generic_model.py b/lib/models/cell_searchs/generic_model.py index 5b437cb..908f6fa 100644 --- a/lib/models/cell_searchs/generic_model.py +++ b/lib/models/cell_searchs/generic_model.py @@ -6,7 +6,7 @@ import torch.nn as nn from copy import deepcopy from typing import Text -from ..cell_operations import ResNetBasicblock +from ..cell_operations import ResNetBasicblock, drop_path from .search_cells import NAS201SearchCell as SearchCell from .genotypes import Structure from .search_model_enas_utils import Controller @@ -48,6 +48,7 @@ class GenericNAS201Model(nn.Module): self.dynamic_cell = None self._tau = None self._algo = None + self._drop_path = None def set_algo(self, algo: Text): # used for searching @@ -62,7 +63,7 @@ class GenericNAS201Model(nn.Module): def set_cal_mode(self, mode, dynamic_cell=None): assert mode in ['gdas', 'enas', 'urs', 'joint', 'select', 'dynamic'] - self.mode = mode + self._mode = mode if mode == 'dynamic': self.dynamic_cell = deepcopy(dynamic_cell) else : self.dynamic_cell = None @@ -70,6 +71,10 @@ class GenericNAS201Model(nn.Module): def mode(self): return self._mode + @property + def drop_path(self): + return self._drop_path + @property def weights(self): xlist = list(self._stem.parameters()) @@ -100,6 +105,15 @@ class GenericNAS201Model(nn.Module): string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self._cells), cell.extra_repr()) return string + def show_alphas(self): + with torch.no_grad(): + if self._algo == 'enas': + import pdb; pdb.set_trace() + print('-') + else: + return 'arch-parameters :\n{:}'.format( nn.functional.softmax(self.arch_parameters, dim=-1).cpu() ) + + def extra_repr(self): return ('{name}(C={_C}, Max-Nodes={_max_nodes}, N={_layerN}, L={_Layer}, alg={_algo})'.format(name=self.__class__.__name__, **self.__dict__)) @@ -112,7 +126,7 @@ class GenericNAS201Model(nn.Module): node_str = '{:}<-{:}'.format(i, j) with torch.no_grad(): weights = self.arch_parameters[ self.edge2index[node_str] ] - op_name = self.op_names[ weights.argmax().item() ] + op_name = self._op_names[ weights.argmax().item() ] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return Structure(genotypes) @@ -126,11 +140,11 @@ class GenericNAS201Model(nn.Module): for j in range(i): node_str = '{:}<-{:}'.format(i, j) if use_random: - op_name = random.choice(self.op_names) + op_name = random.choice(self._op_names) else: weights = alphas_cpu[ self.edge2index[node_str] ] op_index = torch.multinomial(weights, 1).item() - op_name = self.op_names[ op_index ] + op_name = self._op_names[ op_index ] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return Structure(genotypes) @@ -142,17 +156,20 @@ class GenericNAS201Model(nn.Module): for i, node_info in enumerate(arch.nodes): for op, xin in node_info: node_str = '{:}<-{:}'.format(i+1, xin) - op_index = self.op_names.index(op) + op_index = self._op_names.index(op) select_logits.append( logits[self.edge2index[node_str], op_index] ) return sum(select_logits).item() - def return_topK(self, K): - archs = Structure.gen_all(self.op_names, self._max_nodes, False) + def return_topK(self, K, use_random=False): + archs = Structure.gen_all(self._op_names, self._max_nodes, False) pairs = [(self.get_log_prob(arch), arch) for arch in archs] if K < 0 or K >= len(archs): K = len(archs) - sorted_pairs = sorted(pairs, key=lambda x: -x[0]) - return_pairs = [sorted_pairs[_][1] for _ in range(K)] - return return_pairs + if use_random: + return random.sample(archs, K) + else: + sorted_pairs = sorted(pairs, key=lambda x: -x[0]) + return_pairs = [sorted_pairs[_][1] for _ in range(K)] + return return_pairs def normalize_archp(self): if self.mode == 'gdas':