When I try to run hyperas on my keras code, I seem to run into an IndexError somehow related to parsing strings.
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform,loguniform
def data():
import math
import platform
import random
import sys
import time
import numpy as np
import scipy.io
import platform
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM,GRU
import numpy as np
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform,loguniform
#from tqdm import *
WINDOW_SIZE=1
BATCH_SIZE=500
make_windows=lambda data, window_size: np.stack([ data[row:row+WINDOW_SIZE,:] for row in range(WINDOW_SIZE,data.shape[0]-WINDOW_SIZE) if row%WINDOW_SIZE==0])
one_hot_row = lambda place,leng: np.array([1 if i==place else 0 for i in range(leng)])
one_hot_df = lambda df, leng: np.array([one_hot_row(row, leng) for row in df])
precision = lambda c_m: c_m.astype(float).diagonal() / np.sum(c_m, 1)
recall = lambda c_m: c_m.astype(float).diagonal() / np.sum(c_m, 0)
f1scores = lambda c_m: 2 * precision(c_m) * recall(c_m) / (precision(c_m) + recall(c_m))
meanf1 = lambda c_m: np.mean(np.nan_to_num(f1scores(c_m)))
OPP_mat_file = scipy.io.loadmat('opp2IMUsOnly.mat')
opp_training_data = OPP_mat_file['trainingData'].transpose()
opp_training_labels = OPP_mat_file['trainingLabels']#.transpose()
opp_val_data = OPP_mat_file['testingData'].transpose()
opp_val_labels = OPP_mat_file['testingLabels']#.transpose()
target_data = make_windows(opp_training_data, WINDOW_SIZE)#pamap_2_data# pamap_2_data[0:(pamap_2_data.shape[0]/2),:]
target_labels = make_windows(one_hot_df(opp_training_labels-1,18), WINDOW_SIZE) #pamap_2_labels-2 # pamap_2_labels[0:(pamap_2_data.shape[0]/2),:]
val_data = make_windows(opp_val_data, WINDOW_SIZE) #pamap_2_data# pamap_2_data[(pamap_2_data.shape[0]/2):,:]
val_labels = make_windows(one_hot_df(opp_val_labels-1,18), WINDOW_SIZE)
return target_data, target_labels, val_data, val_labels
def model(target_data, target_labels, val_data, val_labels):
WINDOW_SIZE=1
BATCH_SIZE=500
DROPOUT=0.5
LAYER_SIZE=1,#{{choice([1,2,3])}}
#LEARNING_RATE=
#LEARNING_RATE_DECAY =
RHO=0.9
N_LAYERS=2
model = Sequential()
model.add(LSTM(300, batch_input_shape=(BATCH_SIZE, WINDOW_SIZE,79), stateful=True,init='glorot_uniform', dropout_W=DROPOUT))
'''for i in range(N_LAYERS-1):
model.add(LSTM(150, stateful=True,init='glorot_uniform', dropout_W=0.5,W_regularizer = keras.regularizers.l1(1) ))#model.add())
'''#model.add(LSTM(output_dim=18, activation='sigmoid', inner_activation='hard_sigmoid', input_shape=(79,)))
model.add(Dropout(DROPOUT))
model.add(Dense(18))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.RMSprop(lr={{loguniform(-7,-2)}},rho=RHO, decay={{loguniform(-7,-2)}}, epsilon=1e-05),#lr=0.010494504630565, rho=0.9,
metrics=['categorical_accuracy'])
model.fit(target_data[:-(target_data.shape[0]%BATCH_SIZE),:], target_labels[:-(target_labels.shape[0]%BATCH_SIZE),0,:], batch_size=BATCH_SIZE, nb_epoch=50,verbose=2)
predictions = model.predict(batch_size=BATCH_SIZE, x=val_data[:-(val_data.shape[0]%BATCH_SIZE),:])
actuals = np.argmax(one_hot_df(opp_val_labels[1:-1-(val_data.shape[0]%BATCH_SIZE)]-1, 18), axis=1)
test_conf_matrix = sklearn.metrics.confusion_matrix(
y_pred=np.argmax(predictions, axis=1),
y_true=actuals, labels=list(range(0, 18)))
trial_mean_f1_score = meanf1(test_conf_matrix)
outputDict = {'loss': -trial_mean_f1_score, 'status': STATUS_OK, 'model': model, 'eval_time': time.time()}
return outputDict
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
Using TensorFlow backend.
Traceback (most recent call last):
File "/Users/shanework/Dropbox/tfreconstlstmexps/9sept modified slightly and still works/using_hyperas.py", line 82, in <module>
trials=Trials())
File "/Users/shanework/miniconda2/lib/python2.7/site-packages/hyperas/optim.py", line 31, in minimize
best_run = base_minimizer(model, data, algo, max_evals, trials, rseed)
File "/Users/shanework/miniconda2/lib/python2.7/site-packages/hyperas/optim.py", line 92, in base_minimizer
model_str = get_hyperopt_model_string(model, data)
File "/Users/shanework/miniconda2/lib/python2.7/site-packages/hyperas/optim.py", line 74, in get_hyperopt_model_string
parts = hyperparameter_names(model_string)
File "/Users/shanework/miniconda2/lib/python2.7/site-packages/hyperas/optim.py", line 154, in hyperparameter_names
parts.append(parts[-1])
IndexError: list index out of range
Thank-you.