This commit is contained in:
D-X-Y 2019-11-10 03:04:05 +11:00
parent fac556c176
commit 54ecec7f75
4 changed files with 2 additions and 164 deletions

View File

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2019 Xuanyi Dong
Copyright (c) 2019 Xuanyi Dong [GitHub: https://github.com/D-X-Y]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -10,7 +10,7 @@ This project contains the following neural architecture search algorithms, imple
## Requirements and Preparation
Please install `PyTorch>=1.0.1`, `Python>=3.6`, and `opencv`.
Please install `PyTorch>=1.1.0`, `Python>=3.6`, and `opencv`.
The CIFAR and ImageNet should be downloaded and extracted into `$TORCH_HOME`.
Some methods use knowledge distillation (KD), which require pre-trained models. Please download these models from [Google Driver](https://drive.google.com/open?id=1ANmiYEGX-IQZTfH8w0aSpj-Wypg-0DR-) (or train by yourself) and save into `.latent-data`.

View File

@ -1,38 +0,0 @@
import os, sys, time, queue, torch
from pathlib import Path
lib_dir = (Path(__file__).parent / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from log_utils import time_string
from models import CellStructure
def get_unique_matrix(archs, consider_zero):
UniquStrs = [arch.to_unique_str(consider_zero) for arch in archs]
print ('{:} create unique-string done'.format(time_string()))
sm_matrix = torch.eye(len(archs)).bool()
for i, _ in enumerate(UniquStrs):
for j in range(i):
sm_matrix[i,j] = sm_matrix[j,i] = UniquStrs[i] == UniquStrs[j]
unique_ids, unique_num = [-1 for _ in archs], 0
for i in range(len(unique_ids)):
if unique_ids[i] > -1: continue
neighbours = sm_matrix[i].nonzero().view(-1).tolist()
for nghb in neighbours:
assert unique_ids[nghb] == -1, 'impossible'
unique_ids[nghb] = unique_num
unique_num += 1
return sm_matrix, unique_ids, unique_num
def check_unique_arch():
print ('{:} start'.format(time_string()))
meta_info = torch.load('./output/AA-NAS-BENCH-4/meta-node-4.pth')
arch_strs = meta_info['archs']
archs = [CellStructure.str2structure(arch_str) for arch_str in arch_strs]
_, _, unique_num = get_unique_matrix(archs, False)
print ('{:} There are {:} unique architectures (not considering zero).'.format(time_string(), unique_num))
_, _, unique_num = get_unique_matrix(archs, True)
print ('{:} There are {:} unique architectures (considering zero).'.format(time_string(), unique_num))
if __name__ == '__main__':
check_unique_arch()

View File

@ -1,124 +0,0 @@
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import os, sys, time, torch
from procedures import prepare_seed, get_optim_scheduler
from utils import get_model_infos, obtain_accuracy
from config_utils import dict2config
from log_utils import AverageMeter, time_string, convert_secs2time
from models import get_cell_based_tiny_net
__all__ = ['evaluate_for_seed', 'pure_evaluate']
def pure_evaluate(xloader, network, criterion=torch.nn.CrossEntropyLoss()):
data_time, batch_time, batch = AverageMeter(), AverageMeter(), None
losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter()
latencies = []
network.eval()
with torch.no_grad():
end = time.time()
for i, (inputs, targets) in enumerate(xloader):
targets = targets.cuda(non_blocking=True)
inputs = inputs.cuda(non_blocking=True)
data_time.update(time.time() - end)
# forward
features, logits = network(inputs)
loss = criterion(logits, targets)
batch_time.update(time.time() - end)
if batch is None or batch == inputs.size(0):
batch = inputs.size(0)
latencies.append( batch_time.val - data_time.val )
# record loss and accuracy
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update (prec1.item(), inputs.size(0))
top5.update (prec5.item(), inputs.size(0))
end = time.time()
if len(latencies) > 2: latencies = latencies[1:]
return losses.avg, top1.avg, top5.avg, latencies
def procedure(xloader, network, criterion, scheduler, optimizer, mode):
losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter()
if mode == 'train' : network.train()
elif mode == 'valid': network.eval()
else: raise ValueError("The mode is not right : {:}".format(mode))
for i, (inputs, targets) in enumerate(xloader):
if mode == 'train': scheduler.update(None, 1.0 * i / len(xloader))
targets = targets.cuda(non_blocking=True)
if mode == 'train': optimizer.zero_grad()
# forward
features, logits = network(inputs)
loss = criterion(logits, targets)
# backward
if mode == 'train':
loss.backward()
optimizer.step()
# record loss and accuracy
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update (prec1.item(), inputs.size(0))
top5.update (prec5.item(), inputs.size(0))
return losses.avg, top1.avg, top5.avg
def evaluate_for_seed(arch_config, config, arch, train_loader, valid_loader, seed, logger):
prepare_seed(seed) # random seed
net = get_cell_based_tiny_net(dict2config({'name': 'infer.tiny',
'C': arch_config['channel'], 'N': arch_config['num_cells'],
'genotype': arch, 'num_classes': config.class_num}
, None)
)
#net = TinyNetwork(arch_config['channel'], arch_config['num_cells'], arch, config.class_num)
flop, param = get_model_infos(net, config.xshape)
logger.log('Network : {:}'.format(net.get_message()), False)
logger.log('Seed-------------------------- {:} --------------------------'.format(seed))
logger.log('FLOP = {:} MB, Param = {:} MB'.format(flop, param))
# train and valid
optimizer, scheduler, criterion = get_optim_scheduler(net.parameters(), config)
network, criterion = torch.nn.DataParallel(net).cuda(), criterion.cuda()
# start training
start_time, epoch_time, total_epoch = time.time(), AverageMeter(), config.epochs + config.warmup
train_losses, train_acc1es, train_acc5es, valid_losses, valid_acc1es, valid_acc5es = {}, {}, {}, {}, {}, {}
for epoch in range(total_epoch):
scheduler.update(epoch, 0.0)
train_loss, train_acc1, train_acc5 = procedure(train_loader, network, criterion, scheduler, optimizer, 'train')
with torch.no_grad():
valid_loss, valid_acc1, valid_acc5 = procedure(valid_loader, network, criterion, None, None, 'valid')
train_losses[epoch] = train_loss
train_acc1es[epoch] = train_acc1
train_acc5es[epoch] = train_acc5
valid_losses[epoch] = valid_loss
valid_acc1es[epoch] = valid_acc1
valid_acc5es[epoch] = valid_acc5
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (total_epoch-epoch-1), True) )
logger.log('{:} {:} epoch={:03d}/{:03d} :: Train [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%] Valid [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%]'.format(time_string(), need_time, epoch, total_epoch, train_loss, train_acc1, train_acc5, valid_loss, valid_acc1, valid_acc5))
info_seed = {'flop' : flop,
'param': param,
'channel' : arch_config['channel'],
'num_cells' : arch_config['num_cells'],
'config' : config._asdict(),
'total_epoch' : total_epoch ,
'train_losses': train_losses,
'train_acc1es': train_acc1es,
'train_acc5es': train_acc5es,
'valid_losses': valid_losses,
'valid_acc1es': valid_acc1es,
'valid_acc5es': valid_acc5es,
'net_state_dict': net.state_dict(),
'net_string' : '{:}'.format(net),
'finish-train': True
}
return info_seed