config = {}
config['static_variables'] = len(static_cols)
config['time_varying_categoical_variables'] = 1
config['time_varying_real_variables_encoder'] = 3#4
config['time_varying_real_variables_decoder'] = 2#3
config['num_masked_series'] = 1
config['static_embedding_vocab_sizes'] = [369]
config['time_varying_embedding_vocab_sizes'] = [369]
config['embedding_dim'] = 8
config['lstm_hidden_dimension'] = 160
config['lstm_layers'] = 1
config['dropout'] = 0.05
config['device'] = 'cpu'
config['batch_size'] = 64
config['encode_length'] = 168
config['attn_heads'] = 4
config['num_quantiles'] = 3
config['vailid_quantiles'] = [0.1,0.5,0.9]
class TFT(nn.Module):
def __init__(self, config):
super(TFT, self).__init__()
........
self.seq_length = config['seq_length']
I've tried seq_length values 0-1000 but for all values I'm getting the error below.
IndexError Traceback (most recent call last)
<ipython-input-280-9bd3ca615a58> in <module>
1 output,encoder_output, decoder_output, \
2 attn,attn_output_weights, \
----> 3 static_embedding, embeddings_encoder, embeddings_decoder = model.forward(batch)
~\Downloads\tft_model.py in forward(self, x)
346 ##Embedding and variable selection
347 static_embedding = torch.cat(embedding_vectors, dim=1)
--> 348 embeddings_encoder = self.apply_embedding(x['inputs'][:,:self.encode_length,:].float().to(self.device), static_embedding, apply_masking=False)
349 embeddings_decoder = self.apply_embedding(x['inputs'][:,self.encode_length:,:].float().to(self.device), static_embedding, apply_masking=True)
350 embeddings_encoder, encoder_sparse_weights = self.encoder_variable_selection(embeddings_encoder[:,:,:-(self.embedding_dim*self.static_variables)],embeddings_encoder[:,:,-(self.embedding_dim*self.static_variables):])
~\Downloads\tft_model.py in apply_embedding(self, x, static_embedding, apply_masking)
299 time_varying_categoical_vectors = []
300 for i in range(self.time_varying_categoical_variables):
--> 301 emb = self.time_varying_embedding_layers[i](x[:, :,self.time_varying_real_variables_encoder+i].view(x.size(0), -1, 1).long())
302 time_varying_categoical_vectors.append(emb)
303 time_varying_categoical_embedding = torch.cat(time_varying_categoical_vectors, dim=2)
C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
~\Downloads\tft_model.py in forward(self, x)
48 x_reshape = x.contiguous().view(-1, x.size(-1)) # (samples * timesteps, input_size)
49
---> 50 y = self.module(x_reshape)
51
52 # We have to reshape Y
C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input)
156
157 def forward(self, input: Tensor) -> Tensor:
--> 158 return F.embedding(
159 input, self.weight, self.padding_idx, self.max_norm,
160 self.norm_type, self.scale_grad_by_freq, self.sparse)
C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
2197 # remove once script supports set_grad_enabled
2198 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 2199 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
2200
2201
IndexError: index out of range in self