Temp / 0.5
This commit is contained in:
parent
2fa358fdf6
commit
cc28e1589e
exps/trading
lib
@ -65,6 +65,10 @@ def update_market(config, market):
|
|||||||
def run_exp(task_config, dataset, experiment_name, recorder_name, uri):
|
def run_exp(task_config, dataset, experiment_name, recorder_name, uri):
|
||||||
|
|
||||||
# model initiaiton
|
# model initiaiton
|
||||||
|
print('')
|
||||||
|
print('[{:}] - [{:}]: {:}'.format(experiment_name, recorder_name, uri))
|
||||||
|
print('dataset={:}'.format(dataset))
|
||||||
|
|
||||||
model = init_instance_by_config(task_config["model"])
|
model = init_instance_by_config(task_config["model"])
|
||||||
|
|
||||||
# start exp
|
# start exp
|
||||||
|
@ -1,11 +1,15 @@
|
|||||||
|
#####################################################
|
||||||
|
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.02 #
|
||||||
|
#####################################################
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
import math
|
import math
|
||||||
|
|
||||||
class PositionalEncoder(nn.Module):
|
class PositionalEncoder(nn.Module):
|
||||||
# Attention Is All You Need: https://arxiv.org/pdf/1706.03762.pdf
|
# Attention Is All You Need: https://arxiv.org/pdf/1706.03762.pdf
|
||||||
|
# https://github.com/pytorch/examples/blob/master/word_language_model/model.py#L65
|
||||||
|
|
||||||
def __init__(self, d_model, max_seq_len):
|
def __init__(self, d_model, max_seq_len, dropout=0.1):
|
||||||
super(PositionalEncoder, self).__init__()
|
super(PositionalEncoder, self).__init__()
|
||||||
self.d_model = d_model
|
self.d_model = d_model
|
||||||
# create constant 'pe' matrix with values dependant on
|
# create constant 'pe' matrix with values dependant on
|
||||||
@ -26,4 +30,6 @@ class PositionalEncoder(nn.Module):
|
|||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
batch, seq, fdim = x.shape[:3]
|
batch, seq, fdim = x.shape[:3]
|
||||||
embeddings = self.pe[:, :seq, :fdim]
|
embeddings = self.pe[:, :seq, :fdim]
|
||||||
|
import pdb; pdb.set_trace()
|
||||||
|
outs = self.dropout(x + embeddings)
|
||||||
return x + embeddings
|
return x + embeddings
|
||||||
|
@ -9,7 +9,6 @@ import numpy as np
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import copy
|
import copy
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from sklearn.metrics import roc_auc_score, mean_squared_error
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -23,10 +22,11 @@ from qlib.log import get_module_logger, TimeInspector
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
import torch.optim as optim
|
import torch.optim as optim
|
||||||
|
|
||||||
import layers as xlayers
|
import layers as xlayers
|
||||||
from utils import count_parameters_in_MB
|
from utils import count_parameters
|
||||||
|
|
||||||
from qlib.model.base import Model
|
from qlib.model.base import Model
|
||||||
from qlib.data.dataset import DatasetH
|
from qlib.data.dataset import DatasetH
|
||||||
@ -137,9 +137,11 @@ class QuantTransformer(Model):
|
|||||||
mask = ~torch.isnan(label)
|
mask = ~torch.isnan(label)
|
||||||
|
|
||||||
if self.loss == "mse":
|
if self.loss == "mse":
|
||||||
|
import pdb; pdb.set_trace()
|
||||||
|
print('--')
|
||||||
return self.mse(pred[mask], label[mask])
|
return self.mse(pred[mask], label[mask])
|
||||||
|
else:
|
||||||
raise ValueError("unknown loss `%s`" % self.loss)
|
raise ValueError("unknown loss `{:}`".format(self.loss))
|
||||||
|
|
||||||
def metric_fn(self, pred, label):
|
def metric_fn(self, pred, label):
|
||||||
|
|
||||||
@ -147,8 +149,8 @@ class QuantTransformer(Model):
|
|||||||
|
|
||||||
if self.metric == "" or self.metric == "loss":
|
if self.metric == "" or self.metric == "loss":
|
||||||
return -self.loss_fn(pred[mask], label[mask])
|
return -self.loss_fn(pred[mask], label[mask])
|
||||||
|
else:
|
||||||
raise ValueError("unknown metric `%s`" % self.metric)
|
raise ValueError("unknown metric `{:}`".format(self.metric))
|
||||||
|
|
||||||
def train_epoch(self, x_train, y_train):
|
def train_epoch(self, x_train, y_train):
|
||||||
|
|
||||||
|
@ -4,10 +4,23 @@ import numpy as np
|
|||||||
|
|
||||||
|
|
||||||
def count_parameters_in_MB(model):
|
def count_parameters_in_MB(model):
|
||||||
if isinstance(model, nn.Module):
|
return count_parameters(model, "mb")
|
||||||
return np.sum(np.prod(v.size()) for v in model.parameters())/1e6
|
|
||||||
|
|
||||||
|
def count_parameters(model_or_parameters, unit="mb"):
|
||||||
|
if isinstance(model_or_parameters, nn.Module):
|
||||||
|
counts = np.sum(np.prod(v.size()) for v in model_or_parameters.parameters())
|
||||||
else:
|
else:
|
||||||
return np.sum(np.prod(v.size()) for v in model)/1e6
|
counts = np.sum(np.prod(v.size()) for v in model_or_parameters)
|
||||||
|
if unit.lower() == "mb":
|
||||||
|
counts /= 1e6
|
||||||
|
elif unit.lower() == "kb":
|
||||||
|
counts /= 1e3
|
||||||
|
elif unit.lower() == "gb":
|
||||||
|
counts /= 1e9
|
||||||
|
elif unit is not None:
|
||||||
|
raise ValueError("Unknow unit: {:}".format(unit))
|
||||||
|
return counts
|
||||||
|
|
||||||
|
|
||||||
def get_model_infos(model, shape):
|
def get_model_infos(model, shape):
|
||||||
|
Loading…
Reference in New Issue
Block a user