Skip to content

Commit e1d6386

Browse files
Joldnineprasanthpul
authored andcommitted
correct the command to execute the onnxruntime server; correct the inference_url which caused the issue 'no model loaded';add comments to clarify the input&output names (#160)
1 parent a9d58c1 commit e1d6386

1 file changed

Lines changed: 7 additions & 4 deletions

File tree

tutorials/OnnxRuntimeServerSSDModel.ipynb

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
"\n",
3939
"3) In the same folder as the downloaded `ssd.onnx` file, please run (to be run on Linux, for Windows you must use WSL):\n",
4040
"\n",
41-
"`sudo docker run -it -v $(pwd):$(pwd) -e MODEL_ABSOLUTE_PATH=$(pwd)/ssd.onnx -p 9001:8001 mcr.microsoft.com/onnxruntime/server` \n",
41+
"`sudo docker run -it -v $(pwd):$(pwd) -p 9001:8001 mcr.microsoft.com/onnxruntime/server --model_path $(pwd)/ssd.onnx` \n",
4242
"\n",
4343
"(In case of errors like port already allocated etc., please only change the number 9001 to something else (keeping 8001 as is). Please remember the changed port number as it will be be needed to modify the URL where the HTTP request is actually sent. Instructions will be present in python comments in the appropriate Jupyter cell.) \n",
4444
"\n",
@@ -156,6 +156,8 @@
156156
"input_tensor.raw_data = norm_img_data.tobytes()\n",
157157
"\n",
158158
"request_message = predict_pb2.PredictRequest()\n",
159+
"\n",
160+
"# For your model, the inputs name should be something else customized by yourself. Use Netron to find out the input name.\n",
159161
"request_message.inputs[\"image\"].data_type = input_tensor.data_type\n",
160162
"request_message.inputs[\"image\"].dims.extend(input_tensor.dims)\n",
161163
"request_message.inputs[\"image\"].raw_data = input_tensor.raw_data\n",
@@ -187,7 +189,7 @@
187189
"# Change the number 9001 to the appropriate port number if you had changed it during ORT Server docker instantiation\n",
188190
"\n",
189191
"PORT_NUMBER = 9001 # Change appropriately if needed based on any changes when invoking the server in the pre-requisites\n",
190-
"inference_url = \"http://127.0.0.1:\" + str(PORT_NUMBER) + \"/v1/models/ssd/versions/1:predict\"\n",
192+
"inference_url = \"http://127.0.0.1:\" + str(PORT_NUMBER) + \"/v1/models/default/versions/1:predict\"\n",
191193
"response = requests.post(inference_url, headers=request_headers, data=request_message.SerializeToString())"
192194
]
193195
},
@@ -220,6 +222,7 @@
220222
"response_message = predict_pb2.PredictResponse()\n",
221223
"response_message.ParseFromString(response.content)\n",
222224
"\n",
225+
"# For your model, the outputs names should be something else customized by yourself. Use Netron to find out the outputs names.\n",
223226
"bboxes = np.frombuffer(response_message.outputs['bboxes'].raw_data, dtype=np.float32)\n",
224227
"labels = np.frombuffer(response_message.outputs['labels'].raw_data, dtype=np.int64)\n",
225228
"scores = np.frombuffer(response_message.outputs['scores'].raw_data, dtype=np.float32)\n",
@@ -313,9 +316,9 @@
313316
"name": "python",
314317
"nbconvert_exporter": "python",
315318
"pygments_lexer": "ipython3",
316-
"version": "3.6.8"
319+
"version": "3.7.4"
317320
}
318321
},
319322
"nbformat": 4,
320-
"nbformat_minor": 2
323+
"nbformat_minor": 4
321324
}

0 commit comments

Comments
 (0)