I've trained a densenet model on an Ubuntu server with GPU. I've copied the experiment and trying to run inference on MacOS. With Pytorch and Gluon backend I recieve this error.
`Mxnet Version: 1.5.0
Model Details
Loading model - ./workspace/fashion/exp6/output/models/final-symbol.json
/usr/local/lib/python3.7/site-packages/pylg/pylg.py:425: RuntimeWarning: MXNetError RAISED
warnings.warn(core_msg, RuntimeWarning)
Traceback (most recent call last):
File "infer_gluon.py", line 9, in
gtf.Prototype("fashion", "exp6", eval_infer=True);
File "../monk_v1/monk/system/imports.py", line 525, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(args, kwargs)
File "../monk_v1/monk/gluon_prototype.py", line 25, in Prototype
self.set_system_experiment(experiment_name, eval_infer=eval_infer, resume_train=resume_train, copy_from=copy_from, summary=summary);
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(function_args, function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(args, *kwargs)
File "../monk_v1/monk/system/base_class.py", line 82, in set_system_experiment
self.set_system_state_eval_infer();
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(function_args, *function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(args, kwargs)
File "../monk_v1/monk/gluon/finetune/level_5_state_base.py", line 31, in set_system_state_eval_infer
self.set_model_final();
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(function_args, function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(args, kwargs)
File "../monk_v1/monk/gluon/finetune/level_2_model_base.py", line 50, in set_model_final
self.system_dict = model_to_device(self.system_dict);
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(function_args, function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(args, kwargs)
File "../monk_v1/monk/gluon/models/common.py", line 82, in model_to_device
system_dict["local"]["model"].collect_params().reset_ctx(system_dict["local"]["ctx"])
File "/usr/local/lib/python3.7/site-packages/mxnet/gluon/parameter.py", line 879, in reset_ctx
i.reset_ctx(ctx)
File "/usr/local/lib/python3.7/site-packages/mxnet/gluon/parameter.py", line 458, in reset_ctx
self._init_impl(data, ctx)
File "/usr/local/lib/python3.7/site-packages/mxnet/gluon/parameter.py", line 346, in _init_impl
self._data = [data.copyto(ctx) for ctx in self._ctx_list]
File "/usr/local/lib/python3.7/site-packages/mxnet/gluon/parameter.py", line 346, in
self._data = [data.copyto(ctx) for ctx in self._ctx_list]
File "/usr/local/lib/python3.7/site-packages/mxnet/ndarray/ndarray.py", line 2093, in copyto
return _internal._copyto(self, out=hret)
File "", line 25, in _copyto
File "/usr/local/lib/python3.7/site-packages/mxnet/_ctypes/ndarray.py", line 92, in _imperative_invoke
ctypes.byref(out_stypes)))
File "/usr/local/lib/python3.7/site-packages/mxnet/base.py", line 253, in check_call
raise MXNetError(py_str(_LIB.MXGetLastError()))
mxnet.base.MXNetError: [21:23:39] src/ndarray/ndarray.cc:1285: GPU is not enabled
Stack trace:
[bt] (0) 1 libmxnet.so 0x000000010b68d929 mxnet::op::NDArrayOpProp::~NDArrayOpProp() + 4473
[bt] (1) 2 libmxnet.so 0x000000010ccf4e7f mxnet::CopyFromTo(mxnet::NDArray const&, mxnet::NDArray const&, int, bool) + 4671
[bt] (2) 3 libmxnet.so 0x000000010cc17ca9 mxnet::imperative::PushFComputeEx(std::__1::function<void (nnvm::NodeAttrs const&, mxnet::OpContext const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray > const&, std::__1::vector<mxnet::OpReqType, std::__1::allocatormxnet::OpReqType > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray > const&)> const&, nnvm::Op const, nnvm::NodeAttrs const&, mxnet::Context const&, std::__1::vector<mxnet::engine::Var, std::__1::allocatormxnet::engine::Var* > const&, std::__1::vector<mxnet::engine::Var, std::__1::allocatormxnet::engine::Var* > const&, std::__1::vector<mxnet::Resource, std::__1::allocatormxnet::Resource > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&, std::__1::vector<mxnet::OpReqType, std::__1::allocatormxnet::OpReqType > const&)::'lambda'(mxnet::RunContext)::operator()(mxnet::RunContext) const + 217
[bt] (3) 4 libmxnet.so 0x000000010cc0665e mxnet::imperative::PushFComputeEx(std::__1::function<void (nnvm::NodeAttrs const&, mxnet::OpContext const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray > const&, std::__1::vector<mxnet::OpReqType, std::__1::allocatormxnet::OpReqType > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray > const&)> const&, nnvm::Op const, nnvm::NodeAttrs const&, mxnet::Context const&, std::__1::vector<mxnet::engine::Var, std::__1::allocatormxnet::engine::Var* > const&, std::__1::vector<mxnet::engine::Var, std::__1::allocatormxnet::engine::Var* > const&, std::__1::vector<mxnet::Resource, std::__1::allocatormxnet::Resource > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&, std::__1::vector<mxnet::OpReqType, std::__1::allocatormxnet::OpReqType > const&) + 1230
[bt] (4) 5 libmxnet.so 0x000000010cc04f6a mxnet::Imperative::InvokeOp(mxnet::Context const&, nnvm::NodeAttrs const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&, std::__1::vector<mxnet::OpReqType, std::__1::allocatormxnet::OpReqType > const&, mxnet::DispatchMode, mxnet::OpStatePtr) + 810
[bt] (5) 6 libmxnet.so 0x000000010cc098a1 mxnet::Imperative::Invoke(mxnet::Context const&, nnvm::NodeAttrs const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* > const&) + 817
[bt] (6) 7 libmxnet.so 0x000000010cb4f48e SetNDInputsOutputs(nnvm::Op const, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* >, std::__1::vector<mxnet::NDArray, std::__1::allocatormxnet::NDArray* >, int, void const, int, int, int, void) + 1582
[bt] (7) 8 libmxnet.so 0x000000010cb501d0 MXImperativeInvokeEx + 176
[bt] (8) 9 _ctypes.cpython-37m-darwin.so 0x0000000107b8d3a7 ffi_call_unix64 + 79
Traceback (most recent call last):
File "", line 1, in
File "../monk_v1/monk/system/imports.py", line 525, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(*args, **kwargs)
File "../monk_v1/monk/pytorch_prototype.py", line 24, in Prototype
self.set_system_experiment(experiment_name, eval_infer=eval_infer, resume_train=resume_train, copy_from=copy_from, summary=summary);
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(*args, **kwargs)
File "../monk_v1/monk/system/base_class.py", line 82, in set_system_experiment
self.set_system_state_eval_infer();
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(*args, **kwargs)
File "../monk_v1/monk/pytorch/finetune/level_5_state_base.py", line 27, in set_system_state_eval_infer
self.set_model_final();
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(*args, **kwargs)
File "../monk_v1/monk/pytorch/finetune/level_2_model_base.py", line 44, in set_model_final
self.system_dict["local"]["model"] = load_model(self.system_dict, final=True);
File "../monk_v1/monk/system/imports.py", line 173, in decorator_wrapper
return validate_function(*function_args, **function_args_dicts)
File "/usr/local/lib/python3.7/site-packages/pylg/pylg.py", line 287, in call
rv = self.function.function(*args, **kwargs)
File "../monk_v1/monk/pytorch/models/return_model.py", line 15, in load_model
finetune_net = torch.load(system_dict["model_dir"] + "final");
File "/usr/local/lib/python3.7/site-packages/torch/serialization.py", line 386, in load
return _load(f, map_location, pickle_module, **pickle_load_args)
File "/usr/local/lib/python3.7/site-packages/torch/serialization.py", line 573, in _load
result = unpickler.load()
File "/usr/local/lib/python3.7/site-packages/torch/serialization.py", line 536, in persistent_load
deserialized_objects[root_key] = restore_location(obj, location)
File "/usr/local/lib/python3.7/site-packages/torch/serialization.py", line 119, in default_restore_location
result = fn(storage, location)
File "/usr/local/lib/python3.7/site-packages/torch/serialization.py", line 95, in _cuda_deserialize
device = validate_cuda_device(location)
File "/usr/local/lib/python3.7/site-packages/torch/serialization.py", line 79, in validate_cuda_device
raise RuntimeError('Attempting to deserialize object on a CUDA '
RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.`
The inference works nicely with Keras backend.