autodl-projects/lib/trade_models/quant_transformer.py

359 lines
13 KiB
Python
Raw Normal View History

2021-03-03 14:57:48 +01:00
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021 #
##################################################
2021-02-25 09:24:56 +01:00
from __future__ import division
from __future__ import print_function
2021-03-15 03:58:34 +01:00
import os, math, random
2021-03-07 10:44:26 +01:00
from collections import OrderedDict
2021-02-25 09:24:56 +01:00
import numpy as np
import pandas as pd
2021-03-28 12:57:20 +02:00
from typing import Text, Union
2021-02-25 09:24:56 +01:00
import copy
from functools import partial
2021-03-07 10:44:26 +01:00
from typing import Optional, Text
2021-02-25 09:24:56 +01:00
2021-03-28 12:57:20 +02:00
from qlib.utils import get_or_create_path
2021-03-15 04:36:36 +01:00
from qlib.log import get_module_logger
2021-02-25 09:24:56 +01:00
import torch
2021-03-05 14:50:30 +01:00
import torch.nn.functional as F
2021-02-25 09:24:56 +01:00
import torch.optim as optim
2021-03-07 06:35:26 +01:00
import torch.utils.data as th_data
2021-02-25 09:24:56 +01:00
2021-03-07 10:44:26 +01:00
from log_utils import AverageMeter
2021-03-05 14:50:30 +01:00
from utils import count_parameters
2021-03-21 13:59:56 +01:00
from xlayers import super_core
from .transformers import DEFAULT_NET_CONFIG
from .transformers import get_transformer
2021-03-15 04:36:36 +01:00
2021-02-25 09:24:56 +01:00
from qlib.model.base import Model
from qlib.data.dataset import DatasetH
from qlib.data.dataset.handler import DataHandlerLP
2021-03-15 04:36:36 +01:00
DEFAULT_OPT_CONFIG = dict(
2021-03-18 08:04:14 +01:00
epochs=200,
lr=0.001,
batch_size=2000,
early_stop=20,
loss="mse",
optimizer="adam",
num_workers=4,
2021-03-07 06:44:59 +01:00
)
2021-02-25 09:24:56 +01:00
2021-03-30 11:02:41 +02:00
def train_or_test_epoch(
xloader, model, loss_fn, metric_fn, is_train, optimizer, device
):
if is_train:
model.train()
else:
model.eval()
score_meter, loss_meter = AverageMeter(), AverageMeter()
for ibatch, (feats, labels) in enumerate(xloader):
feats, labels = feats.to(device), labels.to(device)
# forward the network
preds = model(feats)
loss = loss_fn(preds, labels)
with torch.no_grad():
score = metric_fn(preds, labels)
loss_meter.update(loss.item(), feats.size(0))
score_meter.update(score.item(), feats.size(0))
# optimize the network
if is_train and optimizer is not None:
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), 3.0)
optimizer.step()
return loss_meter.avg, score_meter.avg
2021-03-07 06:35:26 +01:00
class QuantTransformer(Model):
"""Transformer-based Quant Model"""
2021-03-18 08:04:14 +01:00
def __init__(
self, net_config=None, opt_config=None, metric="", GPU=0, seed=None, **kwargs
):
2021-03-07 06:35:26 +01:00
# Set logger.
self.logger = get_module_logger("QuantTransformer")
2021-03-15 03:58:34 +01:00
self.logger.info("QuantTransformer PyTorch version...")
2021-03-07 06:35:26 +01:00
# set hyper-parameters.
2021-03-15 04:36:36 +01:00
self.net_config = net_config or DEFAULT_NET_CONFIG
self.opt_config = opt_config or DEFAULT_OPT_CONFIG
2021-03-07 06:35:26 +01:00
self.metric = metric
2021-03-18 08:04:14 +01:00
self.device = torch.device(
"cuda:{:}".format(GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu"
)
2021-03-07 06:35:26 +01:00
self.seed = seed
self.logger.info(
"Transformer parameters setting:"
"\nnet_config : {:}"
"\nopt_config : {:}"
"\nmetric : {:}"
"\ndevice : {:}"
"\nseed : {:}".format(
self.net_config,
self.opt_config,
self.metric,
self.device,
self.seed,
)
)
if self.seed is not None:
2021-03-15 03:58:34 +01:00
random.seed(self.seed)
2021-03-07 06:35:26 +01:00
np.random.seed(self.seed)
torch.manual_seed(self.seed)
2021-03-15 03:58:34 +01:00
if self.use_gpu:
torch.cuda.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
2021-03-07 06:35:26 +01:00
2021-03-15 04:36:36 +01:00
self.model = get_transformer(self.net_config)
2021-03-21 13:59:56 +01:00
self.model.set_super_run_type(super_core.SuperRunMode.FullModel)
2021-03-07 06:35:26 +01:00
self.logger.info("model: {:}".format(self.model))
self.logger.info("model size: {:.3f} MB".format(count_parameters(self.model)))
if self.opt_config["optimizer"] == "adam":
2021-03-18 08:04:14 +01:00
self.train_optimizer = optim.Adam(
self.model.parameters(), lr=self.opt_config["lr"]
)
2021-03-07 06:35:26 +01:00
elif self.opt_config["optimizer"] == "adam":
2021-03-18 08:04:14 +01:00
self.train_optimizer = optim.SGD(
self.model.parameters(), lr=self.opt_config["lr"]
)
2021-03-07 06:35:26 +01:00
else:
2021-03-18 08:04:14 +01:00
raise NotImplementedError(
"optimizer {:} is not supported!".format(optimizer)
)
2021-02-25 09:24:56 +01:00
2021-03-07 06:35:26 +01:00
self.fitted = False
self.model.to(self.device)
2021-02-25 09:24:56 +01:00
2021-03-07 06:35:26 +01:00
@property
def use_gpu(self):
2021-03-15 03:58:34 +01:00
return self.device != torch.device("cpu")
2021-02-25 09:24:56 +01:00
2021-03-29 07:04:24 +02:00
def to(self, device):
if device is None:
2021-03-29 07:26:06 +02:00
device = "cpu"
self.device = device
self.model.to(self.device)
2021-03-30 14:25:47 +02:00
# move the optimizer
for param in self.train_optimizer.state.values():
# Not sure there are any global tensors in the state dict
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to(device)
2021-03-29 07:04:24 +02:00
2021-03-07 06:35:26 +01:00
def loss_fn(self, pred, label):
mask = ~torch.isnan(label)
if self.opt_config["loss"] == "mse":
return F.mse_loss(pred[mask], label[mask])
else:
raise ValueError("unknown loss `{:}`".format(self.loss))
2021-02-25 09:24:56 +01:00
2021-03-07 06:35:26 +01:00
def metric_fn(self, pred, label):
2021-03-07 10:44:26 +01:00
# the metric score : higher is better
2021-03-07 06:35:26 +01:00
if self.metric == "" or self.metric == "loss":
2021-03-07 10:44:26 +01:00
return -self.loss_fn(pred, label)
2021-02-25 09:24:56 +01:00
else:
2021-03-07 06:35:26 +01:00
raise ValueError("unknown metric `{:}`".format(self.metric))
def fit(
self,
dataset: DatasetH,
2021-03-17 10:10:45 +01:00
save_dir: Optional[Text] = None,
2021-03-07 06:35:26 +01:00
):
def _prepare_dataset(df_data):
return th_data.TensorDataset(
torch.from_numpy(df_data["feature"].values).float(),
torch.from_numpy(df_data["label"].values).squeeze().float(),
)
2021-03-07 06:44:59 +01:00
def _prepare_loader(dataset, shuffle):
return th_data.DataLoader(
dataset,
batch_size=self.opt_config["batch_size"],
drop_last=False,
pin_memory=True,
num_workers=self.opt_config["num_workers"],
shuffle=shuffle,
)
2021-03-07 06:35:26 +01:00
df_train, df_valid, df_test = dataset.prepare(
["train", "valid", "test"],
col_set=["feature", "label"],
data_key=DataHandlerLP.DK_L,
)
train_dataset, valid_dataset, test_dataset = (
_prepare_dataset(df_train),
_prepare_dataset(df_valid),
_prepare_dataset(df_test),
)
2021-03-07 06:44:59 +01:00
train_loader, valid_loader, test_loader = (
_prepare_loader(train_dataset, True),
_prepare_loader(valid_dataset, False),
_prepare_loader(test_dataset, False),
2021-03-07 06:35:26 +01:00
)
2021-03-17 10:10:45 +01:00
save_dir = get_or_create_path(save_dir, return_dir=True)
2021-03-18 08:04:14 +01:00
self.logger.info(
"Fit procedure for [{:}] with save path={:}".format(
self.__class__.__name__, save_dir
)
)
2021-03-07 06:35:26 +01:00
2021-03-07 10:44:26 +01:00
def _internal_test(ckp_epoch=None, results_dict=None):
with torch.no_grad():
2021-03-30 11:02:41 +02:00
shared_kwards = {
"model": self.model,
"loss_fn": self.loss_fn,
"metric_fn": self.metric_fn,
"is_train": False,
"optimizer": None,
"device": self.device,
}
train_loss, train_score = train_or_test_epoch(
train_loader, **shared_kwards
2021-03-07 10:44:26 +01:00
)
2021-03-30 11:02:41 +02:00
valid_loss, valid_score = train_or_test_epoch(
valid_loader, **shared_kwards
2021-03-07 10:44:26 +01:00
)
2021-03-30 11:02:41 +02:00
test_loss, test_score = train_or_test_epoch(
test_loader, **shared_kwards
2021-03-07 10:44:26 +01:00
)
2021-03-18 08:04:14 +01:00
xstr = (
"train-score={:.6f}, valid-score={:.6f}, test-score={:.6f}".format(
train_score, valid_score, test_score
)
2021-03-07 10:44:26 +01:00
)
if ckp_epoch is not None and isinstance(results_dict, dict):
results_dict["train"][ckp_epoch] = train_score
results_dict["valid"][ckp_epoch] = valid_score
results_dict["test"][ckp_epoch] = test_score
return dict(train=train_score, valid=valid_score, test=test_score), xstr
# Pre-fetch the potential checkpoints
2021-03-17 10:10:45 +01:00
ckp_path = os.path.join(save_dir, "{:}.pth".format(self.__class__.__name__))
2021-03-07 10:44:26 +01:00
if os.path.exists(ckp_path):
2021-03-29 07:26:06 +02:00
ckp_data = torch.load(ckp_path, map_location=self.device)
2021-03-18 08:04:14 +01:00
stop_steps, best_score, best_epoch = (
ckp_data["stop_steps"],
ckp_data["best_score"],
ckp_data["best_epoch"],
)
start_epoch, best_param = ckp_data["start_epoch"], ckp_data["best_param"]
results_dict = ckp_data["results_dict"]
self.model.load_state_dict(ckp_data["net_state_dict"])
self.train_optimizer.load_state_dict(ckp_data["opt_state_dict"])
2021-03-17 10:10:45 +01:00
self.logger.info("Resume from existing checkpoint: {:}".format(ckp_path))
2021-03-07 10:44:26 +01:00
else:
stop_steps, best_score, best_epoch = 0, -np.inf, -1
2021-03-17 10:10:45 +01:00
start_epoch, best_param = 0, None
2021-03-18 08:04:14 +01:00
results_dict = dict(
train=OrderedDict(), valid=OrderedDict(), test=OrderedDict()
)
2021-03-07 10:44:26 +01:00
_, eval_str = _internal_test(-1, results_dict)
2021-03-18 08:04:14 +01:00
self.logger.info(
"Training from scratch, metrics@start: {:}".format(eval_str)
)
2021-03-07 10:44:26 +01:00
for iepoch in range(start_epoch, self.opt_config["epochs"]):
self.logger.info(
"Epoch={:03d}/{:03d} ::==>> Best valid @{:03d} ({:.6f})".format(
iepoch, self.opt_config["epochs"], best_epoch, best_score
)
2021-03-07 06:35:26 +01:00
)
2021-03-30 11:02:41 +02:00
train_loss, train_score = train_or_test_epoch(
2021-03-18 08:04:14 +01:00
train_loader,
self.model,
self.loss_fn,
self.metric_fn,
True,
self.train_optimizer,
2021-03-30 11:02:41 +02:00
self.device,
2021-03-18 08:04:14 +01:00
)
self.logger.info(
"Training :: loss={:.6f}, score={:.6f}".format(train_loss, train_score)
2021-03-07 10:44:26 +01:00
)
2021-03-07 06:35:26 +01:00
2021-03-07 10:44:26 +01:00
current_eval_scores, eval_str = _internal_test(iepoch, results_dict)
2021-03-07 06:35:26 +01:00
self.logger.info("Evaluating :: {:}".format(eval_str))
2021-03-07 10:44:26 +01:00
if current_eval_scores["valid"] > best_score:
2021-03-18 08:04:14 +01:00
stop_steps, best_epoch, best_score = (
0,
iepoch,
current_eval_scores["valid"],
)
2021-03-07 06:35:26 +01:00
best_param = copy.deepcopy(self.model.state_dict())
else:
stop_steps += 1
if stop_steps >= self.opt_config["early_stop"]:
2021-03-18 08:04:14 +01:00
self.logger.info(
"early stop at {:}-th epoch, where the best is @{:}".format(
iepoch, best_epoch
)
)
2021-03-07 06:35:26 +01:00
break
2021-03-07 10:44:26 +01:00
save_info = dict(
net_config=self.net_config,
opt_config=self.opt_config,
net_state_dict=self.model.state_dict(),
opt_state_dict=self.train_optimizer.state_dict(),
best_param=best_param,
stop_steps=stop_steps,
best_score=best_score,
best_epoch=best_epoch,
2021-03-17 10:10:45 +01:00
results_dict=results_dict,
2021-03-07 10:44:26 +01:00
start_epoch=iepoch + 1,
)
2021-03-29 07:26:59 +02:00
torch.save(save_info, ckp_path)
2021-03-18 08:04:14 +01:00
self.logger.info(
"The best score: {:.6f} @ {:02d}-th epoch".format(best_score, best_epoch)
)
2021-03-07 06:35:26 +01:00
self.model.load_state_dict(best_param)
2021-03-18 08:04:14 +01:00
_, eval_str = _internal_test("final", results_dict)
2021-03-17 10:10:45 +01:00
self.logger.info("Reload the best parameter :: {:}".format(eval_str))
2021-03-07 06:35:26 +01:00
if self.use_gpu:
2021-03-30 11:02:41 +02:00
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
2021-03-07 06:35:26 +01:00
self.fitted = True
2021-03-28 12:57:20 +02:00
def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"):
2021-03-07 06:35:26 +01:00
if not self.fitted:
2021-03-07 10:44:26 +01:00
raise ValueError("The model is not fitted yet!")
2021-03-29 06:23:33 +02:00
x_test = dataset.prepare(
segment, col_set="feature", data_key=DataHandlerLP.DK_I
)
2021-03-07 06:35:26 +01:00
index = x_test.index
2021-03-07 10:44:26 +01:00
2021-03-29 06:23:33 +02:00
with torch.no_grad():
self.model.eval()
x_values = x_test.values
sample_num, batch_size = x_values.shape[0], self.opt_config["batch_size"]
preds = []
for begin in range(sample_num)[::batch_size]:
if sample_num - begin < batch_size:
end = sample_num
else:
end = begin + batch_size
x_batch = torch.from_numpy(x_values[begin:end]).float().to(self.device)
with torch.no_grad():
pred = self.model(x_batch).detach().cpu().numpy()
preds.append(pred)
2021-03-07 06:35:26 +01:00
return pd.Series(np.concatenate(preds), index=index)