In tensorflow, I use like the following, but I don't know why it doesn't work.
history=NN.fit(x=train_x,y=Y_train,epochs=500,validation_split=0.1,batch_size=128,callbacks=[early_stopping])
The bug report is the following:
Epoch 1/500
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
[<ipython-input-68-b504eaeb395a>](https://localhost:8080/#) in <module>()
----> 1 history=NN.fit(x=train_x,y=Y_train,epochs=500,validation_split=0.1,batch_size=128,callbacks=[early_stopping])
1 frames
[/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py](https://localhost:8080/#) in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
NotImplementedError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 863, in train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
File "/usr/local/lib/python3.7/dist-packages/keras/optimizer_v1.py", line 792, in minimize
self.apply_gradients(grads_and_vars)
File "/usr/local/lib/python3.7/dist-packages/keras/optimizer_v1.py", line 795, in apply_gradients
self.optimizer.apply_gradients(grads_and_vars, global_step=self.iterations)
NotImplementedError:
I insert the AMSGrad optimizer in the compile block.
# We apply a fairly simple MLP architecture
def build_network():
models=[]
model=Sequential()
model.add(Dense(64,input_dim=122))
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(.15))
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(.15))
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(.15))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer=AMSGrad(learning_rate=0.015,beta1=0.9, beta2=0.99, epsilon=1e-8),metrics=['accuracy'])
return model