2021-03-03 14:57:48 +01:00
|
|
|
##################################################
|
|
|
|
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021 #
|
|
|
|
##################################################
|
2021-02-25 09:24:56 +01:00
|
|
|
from __future__ import division
|
|
|
|
from __future__ import print_function
|
|
|
|
|
2021-03-15 03:58:34 +01:00
|
|
|
import os, math, random
|
2021-03-07 10:44:26 +01:00
|
|
|
from collections import OrderedDict
|
2021-02-25 09:24:56 +01:00
|
|
|
import numpy as np
|
|
|
|
import pandas as pd
|
|
|
|
import copy
|
|
|
|
from functools import partial
|
2021-03-07 10:44:26 +01:00
|
|
|
from typing import Optional, Text
|
2021-02-25 09:24:56 +01:00
|
|
|
|
|
|
|
from qlib.utils import (
|
2021-03-07 06:35:26 +01:00
|
|
|
unpack_archive_with_buffer,
|
|
|
|
save_multiple_parts_file,
|
2021-03-11 14:07:08 +01:00
|
|
|
get_or_create_path,
|
2021-03-07 06:35:26 +01:00
|
|
|
drop_nan_by_y_index,
|
2021-02-25 09:24:56 +01:00
|
|
|
)
|
2021-03-15 04:36:36 +01:00
|
|
|
from qlib.log import get_module_logger
|
2021-02-25 09:24:56 +01:00
|
|
|
|
|
|
|
import torch
|
2021-03-05 14:50:30 +01:00
|
|
|
import torch.nn.functional as F
|
2021-02-25 09:24:56 +01:00
|
|
|
import torch.optim as optim
|
2021-03-07 06:35:26 +01:00
|
|
|
import torch.utils.data as th_data
|
2021-02-25 09:24:56 +01:00
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
from log_utils import AverageMeter
|
2021-03-05 14:50:30 +01:00
|
|
|
from utils import count_parameters
|
2021-03-15 04:36:36 +01:00
|
|
|
from trade_models.transformers import DEFAULT_NET_CONFIG
|
|
|
|
from trade_models.transformers import get_transformer
|
|
|
|
|
2021-02-25 09:24:56 +01:00
|
|
|
|
|
|
|
from qlib.model.base import Model
|
|
|
|
from qlib.data.dataset import DatasetH
|
|
|
|
from qlib.data.dataset.handler import DataHandlerLP
|
|
|
|
|
|
|
|
|
2021-03-15 04:36:36 +01:00
|
|
|
DEFAULT_OPT_CONFIG = dict(
|
2021-03-07 06:44:59 +01:00
|
|
|
epochs=200, lr=0.001, batch_size=2000, early_stop=20, loss="mse", optimizer="adam", num_workers=4
|
|
|
|
)
|
2021-02-25 09:24:56 +01:00
|
|
|
|
|
|
|
|
2021-03-07 06:35:26 +01:00
|
|
|
class QuantTransformer(Model):
|
|
|
|
"""Transformer-based Quant Model"""
|
|
|
|
|
|
|
|
def __init__(self, net_config=None, opt_config=None, metric="", GPU=0, seed=None, **kwargs):
|
|
|
|
# Set logger.
|
|
|
|
self.logger = get_module_logger("QuantTransformer")
|
2021-03-15 03:58:34 +01:00
|
|
|
self.logger.info("QuantTransformer PyTorch version...")
|
2021-03-07 06:35:26 +01:00
|
|
|
|
|
|
|
# set hyper-parameters.
|
2021-03-15 04:36:36 +01:00
|
|
|
self.net_config = net_config or DEFAULT_NET_CONFIG
|
|
|
|
self.opt_config = opt_config or DEFAULT_OPT_CONFIG
|
2021-03-07 06:35:26 +01:00
|
|
|
self.metric = metric
|
|
|
|
self.device = torch.device("cuda:{:}".format(GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
|
|
|
|
self.seed = seed
|
|
|
|
|
|
|
|
self.logger.info(
|
|
|
|
"Transformer parameters setting:"
|
|
|
|
"\nnet_config : {:}"
|
|
|
|
"\nopt_config : {:}"
|
|
|
|
"\nmetric : {:}"
|
|
|
|
"\ndevice : {:}"
|
|
|
|
"\nseed : {:}".format(
|
|
|
|
self.net_config,
|
|
|
|
self.opt_config,
|
|
|
|
self.metric,
|
|
|
|
self.device,
|
|
|
|
self.seed,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.seed is not None:
|
2021-03-15 03:58:34 +01:00
|
|
|
random.seed(self.seed)
|
2021-03-07 06:35:26 +01:00
|
|
|
np.random.seed(self.seed)
|
|
|
|
torch.manual_seed(self.seed)
|
2021-03-15 03:58:34 +01:00
|
|
|
if self.use_gpu:
|
|
|
|
torch.cuda.manual_seed(self.seed)
|
|
|
|
torch.cuda.manual_seed_all(self.seed)
|
2021-03-07 06:35:26 +01:00
|
|
|
|
2021-03-15 04:36:36 +01:00
|
|
|
self.model = get_transformer(self.net_config)
|
2021-03-07 06:35:26 +01:00
|
|
|
self.logger.info("model: {:}".format(self.model))
|
|
|
|
self.logger.info("model size: {:.3f} MB".format(count_parameters(self.model)))
|
|
|
|
|
|
|
|
if self.opt_config["optimizer"] == "adam":
|
|
|
|
self.train_optimizer = optim.Adam(self.model.parameters(), lr=self.opt_config["lr"])
|
|
|
|
elif self.opt_config["optimizer"] == "adam":
|
|
|
|
self.train_optimizer = optim.SGD(self.model.parameters(), lr=self.opt_config["lr"])
|
|
|
|
else:
|
|
|
|
raise NotImplementedError("optimizer {:} is not supported!".format(optimizer))
|
2021-02-25 09:24:56 +01:00
|
|
|
|
2021-03-07 06:35:26 +01:00
|
|
|
self.fitted = False
|
|
|
|
self.model.to(self.device)
|
2021-02-25 09:24:56 +01:00
|
|
|
|
2021-03-07 06:35:26 +01:00
|
|
|
@property
|
|
|
|
def use_gpu(self):
|
2021-03-15 03:58:34 +01:00
|
|
|
return self.device != torch.device("cpu")
|
2021-02-25 09:24:56 +01:00
|
|
|
|
2021-03-07 06:35:26 +01:00
|
|
|
def loss_fn(self, pred, label):
|
|
|
|
mask = ~torch.isnan(label)
|
|
|
|
if self.opt_config["loss"] == "mse":
|
|
|
|
return F.mse_loss(pred[mask], label[mask])
|
|
|
|
else:
|
|
|
|
raise ValueError("unknown loss `{:}`".format(self.loss))
|
2021-02-25 09:24:56 +01:00
|
|
|
|
2021-03-07 06:35:26 +01:00
|
|
|
def metric_fn(self, pred, label):
|
2021-03-07 10:44:26 +01:00
|
|
|
# the metric score : higher is better
|
2021-03-07 06:35:26 +01:00
|
|
|
if self.metric == "" or self.metric == "loss":
|
2021-03-07 10:44:26 +01:00
|
|
|
return -self.loss_fn(pred, label)
|
2021-02-25 09:24:56 +01:00
|
|
|
else:
|
2021-03-07 06:35:26 +01:00
|
|
|
raise ValueError("unknown metric `{:}`".format(self.metric))
|
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
def train_or_test_epoch(self, xloader, model, loss_fn, metric_fn, is_train, optimizer=None):
|
|
|
|
if is_train:
|
|
|
|
model.train()
|
|
|
|
else:
|
|
|
|
model.eval()
|
|
|
|
score_meter, loss_meter = AverageMeter(), AverageMeter()
|
2021-03-07 06:35:26 +01:00
|
|
|
for ibatch, (feats, labels) in enumerate(xloader):
|
|
|
|
feats = feats.to(self.device, non_blocking=True)
|
|
|
|
labels = labels.to(self.device, non_blocking=True)
|
|
|
|
# forward the network
|
|
|
|
preds = model(feats)
|
|
|
|
loss = loss_fn(preds, labels)
|
|
|
|
with torch.no_grad():
|
|
|
|
score = self.metric_fn(preds, labels)
|
2021-03-07 10:44:26 +01:00
|
|
|
loss_meter.update(loss.item(), feats.size(0))
|
|
|
|
score_meter.update(score.item(), feats.size(0))
|
2021-03-07 06:35:26 +01:00
|
|
|
# optimize the network
|
2021-03-07 10:44:26 +01:00
|
|
|
if is_train and optimizer is not None:
|
|
|
|
optimizer.zero_grad()
|
|
|
|
loss.backward()
|
|
|
|
torch.nn.utils.clip_grad_value_(model.parameters(), 3.0)
|
|
|
|
optimizer.step()
|
|
|
|
return loss_meter.avg, score_meter.avg
|
2021-03-07 06:35:26 +01:00
|
|
|
|
|
|
|
def fit(
|
|
|
|
self,
|
|
|
|
dataset: DatasetH,
|
2021-03-07 10:44:26 +01:00
|
|
|
save_path: Optional[Text] = None,
|
2021-03-07 06:35:26 +01:00
|
|
|
):
|
|
|
|
def _prepare_dataset(df_data):
|
|
|
|
return th_data.TensorDataset(
|
|
|
|
torch.from_numpy(df_data["feature"].values).float(),
|
|
|
|
torch.from_numpy(df_data["label"].values).squeeze().float(),
|
|
|
|
)
|
|
|
|
|
2021-03-07 06:44:59 +01:00
|
|
|
def _prepare_loader(dataset, shuffle):
|
|
|
|
return th_data.DataLoader(
|
|
|
|
dataset,
|
|
|
|
batch_size=self.opt_config["batch_size"],
|
|
|
|
drop_last=False,
|
|
|
|
pin_memory=True,
|
|
|
|
num_workers=self.opt_config["num_workers"],
|
|
|
|
shuffle=shuffle,
|
|
|
|
)
|
|
|
|
|
2021-03-07 06:35:26 +01:00
|
|
|
df_train, df_valid, df_test = dataset.prepare(
|
|
|
|
["train", "valid", "test"],
|
|
|
|
col_set=["feature", "label"],
|
|
|
|
data_key=DataHandlerLP.DK_L,
|
|
|
|
)
|
|
|
|
train_dataset, valid_dataset, test_dataset = (
|
|
|
|
_prepare_dataset(df_train),
|
|
|
|
_prepare_dataset(df_valid),
|
|
|
|
_prepare_dataset(df_test),
|
|
|
|
)
|
2021-03-07 06:44:59 +01:00
|
|
|
train_loader, valid_loader, test_loader = (
|
|
|
|
_prepare_loader(train_dataset, True),
|
|
|
|
_prepare_loader(valid_dataset, False),
|
|
|
|
_prepare_loader(test_dataset, False),
|
2021-03-07 06:35:26 +01:00
|
|
|
)
|
|
|
|
|
2021-03-15 03:58:34 +01:00
|
|
|
save_path = get_or_create_path(save_path, return_dir=True)
|
2021-03-07 06:35:26 +01:00
|
|
|
self.logger.info("Fit procedure for [{:}] with save path={:}".format(self.__class__.__name__, save_path))
|
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
def _internal_test(ckp_epoch=None, results_dict=None):
|
|
|
|
with torch.no_grad():
|
|
|
|
train_loss, train_score = self.train_or_test_epoch(
|
|
|
|
train_loader, self.model, self.loss_fn, self.metric_fn, False, None
|
|
|
|
)
|
|
|
|
valid_loss, valid_score = self.train_or_test_epoch(
|
|
|
|
valid_loader, self.model, self.loss_fn, self.metric_fn, False, None
|
|
|
|
)
|
|
|
|
test_loss, test_score = self.train_or_test_epoch(
|
|
|
|
test_loader, self.model, self.loss_fn, self.metric_fn, False, None
|
|
|
|
)
|
|
|
|
xstr = "train-score={:.6f}, valid-score={:.6f}, test-score={:.6f}".format(
|
|
|
|
train_score, valid_score, test_score
|
|
|
|
)
|
|
|
|
if ckp_epoch is not None and isinstance(results_dict, dict):
|
|
|
|
results_dict["train"][ckp_epoch] = train_score
|
|
|
|
results_dict["valid"][ckp_epoch] = valid_score
|
|
|
|
results_dict["test"][ckp_epoch] = test_score
|
|
|
|
return dict(train=train_score, valid=valid_score, test=test_score), xstr
|
|
|
|
|
|
|
|
# Pre-fetch the potential checkpoints
|
|
|
|
ckp_path = os.path.join(save_path, "{:}.pth".format(self.__class__.__name__))
|
|
|
|
if os.path.exists(ckp_path):
|
|
|
|
ckp_data = torch.load(ckp_path)
|
|
|
|
import pdb
|
|
|
|
|
|
|
|
pdb.set_trace()
|
|
|
|
else:
|
|
|
|
stop_steps, best_score, best_epoch = 0, -np.inf, -1
|
|
|
|
start_epoch = 0
|
|
|
|
results_dict = dict(train=OrderedDict(), valid=OrderedDict(), test=OrderedDict())
|
|
|
|
_, eval_str = _internal_test(-1, results_dict)
|
|
|
|
self.logger.info("Training from scratch, metrics@start: {:}".format(eval_str))
|
|
|
|
|
|
|
|
for iepoch in range(start_epoch, self.opt_config["epochs"]):
|
|
|
|
self.logger.info(
|
|
|
|
"Epoch={:03d}/{:03d} ::==>> Best valid @{:03d} ({:.6f})".format(
|
|
|
|
iepoch, self.opt_config["epochs"], best_epoch, best_score
|
|
|
|
)
|
2021-03-07 06:35:26 +01:00
|
|
|
)
|
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
train_loss, train_score = self.train_or_test_epoch(
|
|
|
|
train_loader, self.model, self.loss_fn, self.metric_fn, True, self.train_optimizer
|
|
|
|
)
|
2021-03-07 06:35:26 +01:00
|
|
|
self.logger.info("Training :: loss={:.6f}, score={:.6f}".format(train_loss, train_score))
|
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
current_eval_scores, eval_str = _internal_test(iepoch, results_dict)
|
2021-03-07 06:35:26 +01:00
|
|
|
self.logger.info("Evaluating :: {:}".format(eval_str))
|
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
if current_eval_scores["valid"] > best_score:
|
|
|
|
stop_steps, best_epoch, best_score = 0, iepoch, current_eval_scores["valid"]
|
2021-03-07 06:35:26 +01:00
|
|
|
best_param = copy.deepcopy(self.model.state_dict())
|
|
|
|
else:
|
|
|
|
stop_steps += 1
|
|
|
|
if stop_steps >= self.opt_config["early_stop"]:
|
|
|
|
self.logger.info("early stop at {:}-th epoch, where the best is @{:}".format(iepoch, best_epoch))
|
|
|
|
break
|
2021-03-07 10:44:26 +01:00
|
|
|
save_info = dict(
|
|
|
|
net_config=self.net_config,
|
|
|
|
opt_config=self.opt_config,
|
|
|
|
net_state_dict=self.model.state_dict(),
|
|
|
|
opt_state_dict=self.train_optimizer.state_dict(),
|
|
|
|
best_param=best_param,
|
|
|
|
stop_steps=stop_steps,
|
|
|
|
best_score=best_score,
|
|
|
|
best_epoch=best_epoch,
|
|
|
|
start_epoch=iepoch + 1,
|
|
|
|
)
|
|
|
|
torch.save(save_info, ckp_path)
|
2021-03-07 06:35:26 +01:00
|
|
|
self.logger.info("The best score: {:.6f} @ {:02d}-th epoch".format(best_score, best_epoch))
|
|
|
|
self.model.load_state_dict(best_param)
|
|
|
|
|
|
|
|
if self.use_gpu:
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
self.fitted = True
|
|
|
|
|
|
|
|
def predict(self, dataset):
|
|
|
|
if not self.fitted:
|
2021-03-07 10:44:26 +01:00
|
|
|
raise ValueError("The model is not fitted yet!")
|
2021-03-07 06:35:26 +01:00
|
|
|
x_test = dataset.prepare("test", col_set="feature")
|
|
|
|
index = x_test.index
|
2021-03-07 10:44:26 +01:00
|
|
|
|
2021-03-07 06:35:26 +01:00
|
|
|
self.model.eval()
|
|
|
|
x_values = x_test.values
|
2021-03-07 10:44:26 +01:00
|
|
|
sample_num, batch_size = x_values.shape[0], self.opt_config["batch_size"]
|
2021-03-07 06:35:26 +01:00
|
|
|
preds = []
|
2021-02-25 09:24:56 +01:00
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
for begin in range(sample_num)[::batch_size]:
|
2021-03-07 06:35:26 +01:00
|
|
|
|
2021-03-07 10:44:26 +01:00
|
|
|
if sample_num - begin < batch_size:
|
2021-03-07 06:35:26 +01:00
|
|
|
end = sample_num
|
|
|
|
else:
|
2021-03-07 10:44:26 +01:00
|
|
|
end = begin + batch_size
|
2021-03-07 06:35:26 +01:00
|
|
|
|
|
|
|
x_batch = torch.from_numpy(x_values[begin:end]).float().to(self.device)
|
|
|
|
|
|
|
|
with torch.no_grad():
|
2021-03-07 10:44:26 +01:00
|
|
|
pred = self.model(x_batch).detach().cpu().numpy()
|
2021-03-07 06:35:26 +01:00
|
|
|
preds.append(pred)
|
|
|
|
|
|
|
|
return pd.Series(np.concatenate(preds), index=index)
|