import argparse import os import torch from exp.exp_main import Exp_Main import random import json import numpy as np from torch.utils.tensorboard import SummaryWriter import traceback import pathlib import torch import torch.nn as nn import torch.nn.functional as F import numpy as np class moving_avg(nn.Module): """ Moving average block to highlight the trend of time series """ def __init__(self, kernel_size, stride): super(moving_avg, self).__init__() self.kernel_size = kernel_size self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) def forward(self, x): # padding on the both ends of time series front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) x = torch.cat([front, x, end], dim=1) x = self.avg(x.permute(0, 2, 1)) x = x.permute(0, 2, 1) return x class series_decomp(nn.Module): """ Series decomposition block """ def __init__(self, kernel_size): super(series_decomp, self).__init__() self.moving_avg = moving_avg(kernel_size, stride=1) def forward(self, x): moving_mean = self.moving_avg(x) res = x - moving_mean return res, moving_mean class Model(nn.Module): """ Decomposition-Linear """ def __init__(self, configs): super(Model, self).__init__() self.seq_len = configs.seq_len self.pred_len = configs.pred_len # Decompsition Kernel Size kernel_size = 25 self.decompsition = series_decomp(kernel_size) self.individual = configs.individual self.channels = configs.enc_in if self.individual: self.Linear_Seasonal = nn.ModuleList() self.Linear_Trend = nn.ModuleList() for i in range(self.channels): self.Linear_Seasonal.append(nn.Linear(self.seq_len,self.pred_len)) self.Linear_Trend.append(nn.Linear(self.seq_len,self.pred_len)) else: self.Linear_Seasonal = nn.Linear(self.seq_len,self.pred_len) self.Linear_Trend = nn.Linear(self.seq_len,self.pred_len) def forward(self, x): # x: [Batch, Input length, Channel] seasonal_init, trend_init = self.decompsition(x) seasonal_init, trend_init = seasonal_init.permute(0,2,1), trend_init.permute(0,2,1) if self.individual: seasonal_output = torch.zeros([seasonal_init.size(0),seasonal_init.size(1),self.pred_len],dtype=seasonal_init.dtype).to(seasonal_init.device) trend_output = torch.zeros([trend_init.size(0),trend_init.size(1),self.pred_len],dtype=trend_init.dtype).to(trend_init.device) for i in range(self.channels): seasonal_output[:,i,:] = self.Linear_Seasonal[i](seasonal_init[:,i,:]) trend_output[:,i,:] = self.Linear_Trend[i](trend_init[:,i,:]) else: seasonal_output = self.Linear_Seasonal(seasonal_init) trend_output = self.Linear_Trend(trend_init) x = seasonal_output + trend_output return x.permute(0,2,1) # to [Batch, Output length, Channel] if __name__ == '__main__': fix_seed = 2021 random.seed(fix_seed) torch.manual_seed(fix_seed) np.random.seed(fix_seed) parser = argparse.ArgumentParser(description='Autoformer & Transformer family for Time Series Forecasting') parser.add_argument("--out_dir", type=str, default="run_0") # basic config parser.add_argument('--is_training', type=int, required=True, default=1, help='status') parser.add_argument('--train_only', type=bool, required=False, default=False, help='perform training on full input dataset without validation and testing') # data loader parser.add_argument('--data', type=str, required=True, default='ETTm1', help='dataset type') parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file') parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file') parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate') parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task') parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h') parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints') # forecasting task parser.add_argument('--seq_len', type=int, default=96, help='input sequence length') parser.add_argument('--label_len', type=int, default=48, help='start token length') parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length') # DLinear parser.add_argument('--individual', action='store_true', default=False, help='DLinear: a linear layer for each variate(channel) individually') # Formers parser.add_argument('--embed_type', type=int, default=0, help='0: default 1: value embedding + temporal embedding + positional embedding 2: value embedding + temporal embedding 3: value embedding + positional embedding 4: value embedding') parser.add_argument('--enc_in', type=int, default=7, help='encoder input size') # DLinear with --individual, use this hyperparameter as the number of channels parser.add_argument('--dec_in', type=int, default=7, help='decoder input size') parser.add_argument('--c_out', type=int, default=7, help='output size') parser.add_argument('--d_model', type=int, default=512, help='dimension of model') parser.add_argument('--n_heads', type=int, default=8, help='num of heads') parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers') parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers') parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn') parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average') parser.add_argument('--factor', type=int, default=1, help='attn factor') parser.add_argument('--distil', action='store_false', help='whether to use distilling in encoder, using this argument means not using distilling', default=True) parser.add_argument('--dropout', type=float, default=0.05, help='dropout') parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]') parser.add_argument('--activation', type=str, default='gelu', help='activation') parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder') parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data') # optimization parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers') parser.add_argument('--itr', type=int, default=2, help='experiments times') parser.add_argument('--train_epochs', type=int, default=10, help='train epochs') parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data') parser.add_argument('--patience', type=int, default=3, help='early stopping patience') parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate') parser.add_argument('--des', type=str, default='test', help='exp description') parser.add_argument('--loss', type=str, default='mse', help='loss function') parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate') parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False) # GPU parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu') parser.add_argument('--gpu', type=int, default=0, help='gpu') parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False) parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus') parser.add_argument('--test_flop', action='store_true', default=False, help='See utils/tools for usage') args = parser.parse_args() try: log_dir = os.path.join(args.out_dir, 'logs') pathlib.Path(log_dir).mkdir(parents=True, exist_ok=True) writer = SummaryWriter(log_dir) args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False if args.use_gpu and args.use_multi_gpu: args.dvices = args.devices.replace(' ', '') device_ids = args.devices.split(',') args.device_ids = [int(id_) for id_ in device_ids] args.gpu = args.device_ids[0] print('Args in experiment:') print(args) mse,mae = [], [] pred_lens = [96, 192, 336, 720] if args.data_path != 'illness.csv' else [24, 36, 48, 60] for pred_len in pred_lens: args.pred_len = pred_len model = Model(args) Exp = Exp_Main setting = '{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}'.format( args.data, args.features, args.seq_len, args.label_len, pred_len, args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.factor, args.embed, args.distil, args.des) exp = Exp(args,model) # set experiments print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting)) exp.train(setting,writer) print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) single_mae, single_mse = exp.test(setting) print('mse:{}, mae:{}'.format(single_mse, single_mae)) mae.append(single_mae) mse.append(single_mse) torch.cuda.empty_cache() mean_mae = sum(mae) / len(mae) mean_mse = sum(mse) / len(mse) final_infos = { args.data :{ "means":{ "mae": mean_mae, "mse": mean_mse, } } } pathlib.Path(args.out_dir).mkdir(parents=True, exist_ok=True) # with open(os.path.join(args.out_dir, f"final_info_{args.data}.json"), "w") as f: with open(os.path.join(args.out_dir, f"final_info.json"), "w") as f: json.dump(final_infos, f) except Exception as e: print("Original error in subprocess:", flush=True) traceback.print_exc(file=open(os.path.join(args.out_dir, "traceback.log"), "w")) raise