# docker run -it -p 8080:8080 iceychris/libreasr:latest
make sde &
make sen &
make b
make[1]: Entering directory '/workspace'
python3 -u api-server.py de
make[1]: Entering directory '/workspace'
python3 -u api-server.py en
make[1]: Entering directory '/workspace'
python3 -u api-bridge.py
[api-bridge] running on :8080
LM: Failed to load.
LM: Failed to load.
Traceback (most recent call last):
File "api-server.py", line 155, in <module>
Traceback (most recent call last):
File "api-server.py", line 155, in <module>
serve(args.lang)serve(args.lang)
File "api-server.py", line 140, in serve
File "api-server.py", line 140, in serve
apg.add_ASRServicer_to_server(ASRServicer(lang), server)apg.add_ASRServicer_to_server(ASRServicer(lang), server)
File "api-server.py", line 56, in __init__
File "api-server.py", line 56, in __init__
conf, lang, m, x_tfm, x_tfm_stream = load_stuff(lang)
conf, lang, m, x_tfm, x_tfm_stream = load_stuff(lang) File "/workspace/lib/inference.py", line 20, in load_stuff
File "/workspace/lib/inference.py", line 20, in load_stuff
conf, lang, m, tfms = parse_and_apply_config(inference=True, lang=lang)
conf, lang, m, tfms = parse_and_apply_config(inference=True, lang=lang) File "/workspace/lib/config.py", line 151, in parse_and_apply_config
File "/workspace/lib/config.py", line 151, in parse_and_apply_config
load_asr_model(m, lang_name, lang, conf["cuda"]["device"], lm=lm)load_asr_model(m, lang_name, lang, conf["cuda"]["device"], lm=lm)
File "/workspace/lib/model_utils.py", line 88, in load_asr_model
File "/workspace/lib/model_utils.py", line 88, in load_asr_model
model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8model, {torch.nn.LSTM, torch.nn.Linear}, dtype=torch.qint8
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 285, in quantize_dynamic
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 285, in quantize_dynamic
convert(model, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 365, in convert
convert(model, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 365, in convert
convert(mod, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 365, in convert
convert(mod, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 365, in convert
convert(mod, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 365, in convert
convert(mod, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 365, in convert
convert(mod, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 366, in convert
convert(mod, mapping, inplace=True)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 366, in convert
reassign[name] = swap_module(mod, mapping)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 395, in swap_module
reassign[name] = swap_module(mod, mapping)
File "/usr/local/lib/python3.7/dist-packages/torch/quantization/quantize.py", line 395, in swap_module
new_mod = mapping[type(mod)].from_float(mod)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 421, in from_float
new_mod = mapping[type(mod)].from_float(mod)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 421, in from_float
return super(LSTM, cls).from_float(mod)return super(LSTM, cls).from_float(mod)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 229, in from_float
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 229, in from_float
mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 335, in __init__
mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 335, in __init__
super(LSTM, self).__init__('LSTM', *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 85, in __init__
super(LSTM, self).__init__('LSTM', *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/quantized/dynamic/modules/rnn.py", line 85, in __init__
torch.ops.quantized.linear_prepack(w_ih, b_ih)
RuntimeError: Didn't find engine for operation quantized::linear_prepack NoQEngine
torch.ops.quantized.linear_prepack(w_ih, b_ih)
RuntimeError: Didn't find engine for operation quantized::linear_prepack NoQEngine
make[1]: *** [Makefile:55: sen] Error 1
make[1]: Leaving directory '/workspace'
make[1]: *** [Makefile:57: sde] Error 1
make[1]: Leaving directory '/workspace'