add oxford and aircraft
Some checks failed
Test Spaces / build (macos-latest, 3.6) (push) Has been cancelled
Test Spaces / build (macos-latest, 3.7) (push) Has been cancelled
Test Spaces / build (macos-latest, 3.8) (push) Has been cancelled
Test Spaces / build (macos-latest, 3.9) (push) Has been cancelled
Test Spaces / build (ubuntu-18.04, 3.6) (push) Has been cancelled
Test Spaces / build (ubuntu-18.04, 3.7) (push) Has been cancelled
Test Spaces / build (ubuntu-18.04, 3.8) (push) Has been cancelled
Test Spaces / build (ubuntu-18.04, 3.9) (push) Has been cancelled
Test Spaces / build (ubuntu-20.04, 3.6) (push) Has been cancelled
Test Spaces / build (ubuntu-20.04, 3.7) (push) Has been cancelled
Test Spaces / build (ubuntu-20.04, 3.8) (push) Has been cancelled
Test Spaces / build (ubuntu-20.04, 3.9) (push) Has been cancelled
Test Xmisc / build (macos-latest, 3.6) (push) Has been cancelled
Test Xmisc / build (macos-latest, 3.7) (push) Has been cancelled
Test Xmisc / build (macos-latest, 3.8) (push) Has been cancelled
Test Xmisc / build (macos-latest, 3.9) (push) Has been cancelled
Test Xmisc / build (ubuntu-18.04, 3.6) (push) Has been cancelled
Test Xmisc / build (ubuntu-18.04, 3.7) (push) Has been cancelled
Test Xmisc / build (ubuntu-18.04, 3.8) (push) Has been cancelled
Test Xmisc / build (ubuntu-18.04, 3.9) (push) Has been cancelled
Test Xmisc / build (ubuntu-20.04, 3.6) (push) Has been cancelled
Test Xmisc / build (ubuntu-20.04, 3.7) (push) Has been cancelled
Test Xmisc / build (ubuntu-20.04, 3.8) (push) Has been cancelled
Test Xmisc / build (ubuntu-20.04, 3.9) (push) Has been cancelled
Test Super Model / build (macos-latest, 3.6) (push) Has been cancelled
Test Super Model / build (macos-latest, 3.7) (push) Has been cancelled
Test Super Model / build (macos-latest, 3.8) (push) Has been cancelled
Test Super Model / build (macos-latest, 3.9) (push) Has been cancelled
Test Super Model / build (ubuntu-18.04, 3.6) (push) Has been cancelled
Test Super Model / build (ubuntu-18.04, 3.7) (push) Has been cancelled
Test Super Model / build (ubuntu-18.04, 3.8) (push) Has been cancelled
Test Super Model / build (ubuntu-18.04, 3.9) (push) Has been cancelled
Test Super Model / build (ubuntu-20.04, 3.6) (push) Has been cancelled
Test Super Model / build (ubuntu-20.04, 3.7) (push) Has been cancelled
Test Super Model / build (ubuntu-20.04, 3.8) (push) Has been cancelled
Test Super Model / build (ubuntu-20.04, 3.9) (push) Has been cancelled

This commit is contained in:
xmuhanma
2024-12-19 12:40:36 +01:00
parent 889bd1974c
commit 4612cd198b
3 changed files with 325 additions and 272 deletions

View File

@@ -20,7 +20,92 @@ from functions import evaluate_for_seed
from torchvision import datasets, transforms
def evaluate_all_datasets(
# NASBENCH201_CONFIG_PATH = os.path.join( os.getcwd(), 'main_exp', 'transfer_nag')
NASBENCH201_CONFIG_PATH = '/lustre/hpe/ws11/ws11.1/ws/xmuhanma-nbdit/autodl-projects/configs/nas-benchmark'
def evaluate_all_datasets(arch, datasets, xpaths, splits, use_less, seed,
arch_config, workers, logger):
machine_info, arch_config = get_machine_info(), deepcopy(arch_config)
all_infos = {'info': machine_info}
all_dataset_keys = []
# look all the datasets
for dataset, xpath, split in zip(datasets, xpaths, splits):
# train valid data
task = None
train_data, valid_data, xshape, class_num = get_datasets(
dataset, xpath, -1, task)
# load the configuration
if dataset in ['mnist', 'svhn', 'aircraft', 'oxford']:
if use_less:
# config_path = os.path.join(
# NASBENCH201_CONFIG_PATH, 'nas_bench_201/configs/nas-benchmark/LESS.config')
config_path = os.path.join(
NASBENCH201_CONFIG_PATH, 'LESS.config')
else:
# config_path = os.path.join(
# NASBENCH201_CONFIG_PATH, 'nas_bench_201/configs/nas-benchmark/{}.config'.format(dataset))
config_path = os.path.join(
NASBENCH201_CONFIG_PATH, '{}.config'.format(dataset))
p = os.path.join(
NASBENCH201_CONFIG_PATH, '{:}-split.txt'.format(dataset))
if not os.path.exists(p):
import json
label_list = list(range(len(train_data)))
random.shuffle(label_list)
strlist = [str(label_list[i]) for i in range(len(label_list))]
splited = {'train': ["int", strlist[:len(train_data) // 2]],
'valid': ["int", strlist[len(train_data) // 2:]]}
with open(p, 'w') as f:
f.write(json.dumps(splited))
split_info = load_config(os.path.join(
NASBENCH201_CONFIG_PATH, '{:}-split.txt'.format(dataset)), None, None)
else:
raise ValueError('invalid dataset : {:}'.format(dataset))
config = load_config(
config_path, {'class_num': class_num, 'xshape': xshape}, logger)
# data loader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size,
shuffle=True, num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size,
shuffle=False, num_workers=workers, pin_memory=True)
splits = load_config(os.path.join(
NASBENCH201_CONFIG_PATH, '{}-test-split.txt'.format(dataset)), None, None)
ValLoaders = {'ori-test': valid_loader,
'x-valid': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
splits.xvalid),
num_workers=workers, pin_memory=True),
'x-test': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(
splits.xtest),
num_workers=workers, pin_memory=True)
}
dataset_key = '{:}'.format(dataset)
if bool(split):
dataset_key = dataset_key + '-valid'
logger.log(
'Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.
format(dataset_key, len(train_data), len(valid_data), len(train_loader), len(valid_loader), config.batch_size))
logger.log('Evaluate ||||||| {:10s} ||||||| Config={:}'.format(
dataset_key, config))
for key, value in ValLoaders.items():
logger.log(
'Evaluate ---->>>> {:10s} with {:} batchs'.format(key, len(value)))
results = evaluate_for_seed(
arch_config, config, arch, train_loader, ValLoaders, seed, logger)
all_infos[dataset_key] = results
all_dataset_keys.append(dataset_key)
all_infos['all_dataset_keys'] = all_dataset_keys
return all_infos
def evaluate_all_datasets1(
arch, datasets, xpaths, splits, use_less, seed, arch_config, workers, logger
):
machine_info, arch_config = get_machine_info(), deepcopy(arch_config)
@@ -55,7 +140,14 @@ def evaluate_all_datasets(
split_info = load_config(
"configs/nas-benchmark/{:}-split.txt".format(dataset), None, None
)
elif dataset.startswith("oxford"):
if use_less:
config_path = "configs/nas-benchmark/LESS.config"
else:
config_path = "configs/nas-benchmark/oxford.config"
split_info = load_config(
"configs/nas-benchmark/{:}-split.txt".format(dataset), None, None
)
else:
raise ValueError("invalid dataset : {:}".format(dataset))
config = load_config(
@@ -126,6 +218,31 @@ def evaluate_all_datasets(
sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid),
num_workers=workers,
pin_memory=True)
elif dataset == "oxford":
ValLoaders = {
"ori-test": torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True
)
}
# train_data_v2 = deepcopy(train_data)
# train_data_v2.transform = valid_data.transform
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.train),
num_workers=workers,
pin_memory=True)
valid_loader = torch.utils.data.DataLoader(
valid_data,
batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid),
num_workers=workers,
pin_memory=True)
else:
# data loader
train_loader = torch.utils.data.DataLoader(
@@ -142,7 +259,7 @@ def evaluate_all_datasets(
num_workers=workers,
pin_memory=True,
)
if dataset == "cifar10" or dataset == "aircraft":
if dataset == "cifar10" or dataset == "aircraft" or dataset == "oxford":
ValLoaders = {"ori-test": valid_loader}
elif dataset == "cifar100":
cifar100_splits = load_config(