From d33f45b33c37760dd9c96a117088bc3d0c91b9c9 Mon Sep 17 00:00:00 2001 From: Paul Hendricks Date: Wed, 3 Oct 2018 16:33:32 -0400 Subject: [PATCH 1/3] added ability to save model to onnx format during training --- run.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/run.py b/run.py index 3936781..8fcda69 100644 --- a/run.py +++ b/run.py @@ -241,5 +241,11 @@ def main(): print("Saving model to {}".format(model_checkpoint + ".last")) torch.save(rencoder.state_dict(), model_checkpoint + ".last") + # save to onnx + dummy_input = Variable(torch.randn(params['batch_size'], data_layer.vector_dim).type(torch.float)) + torch.onnx.export(rencoder.float(), dummy_input.cuda() if use_gpu else dummy_input, + model_checkpoint + ".onnx", verbose=True) + print("ONNX model saved to {}!".format(model_checkpoint + ".onnx")) + if __name__ == '__main__': main() From fc47cea51c822d6e7a8f2e248d6662ef7331e874 Mon Sep 17 00:00:00 2001 From: Paul Hendricks Date: Wed, 3 Oct 2018 16:34:45 -0400 Subject: [PATCH 2/3] added ability to save model to onnx format during training --- run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run.py b/run.py index 8fcda69..9c6fcc2 100644 --- a/run.py +++ b/run.py @@ -241,7 +241,7 @@ def main(): print("Saving model to {}".format(model_checkpoint + ".last")) torch.save(rencoder.state_dict(), model_checkpoint + ".last") - # save to onnx + # save to onnx dummy_input = Variable(torch.randn(params['batch_size'], data_layer.vector_dim).type(torch.float)) torch.onnx.export(rencoder.float(), dummy_input.cuda() if use_gpu else dummy_input, model_checkpoint + ".onnx", verbose=True) From 579308f5b63bcba8d3d71e0263fb4c20d75a3d42 Mon Sep 17 00:00:00 2001 From: Paul Hendricks Date: Fri, 5 Oct 2018 08:51:24 -0500 Subject: [PATCH 3/3] removed space --- run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run.py b/run.py index 9c6fcc2..471b673 100644 --- a/run.py +++ b/run.py @@ -241,7 +241,7 @@ def main(): print("Saving model to {}".format(model_checkpoint + ".last")) torch.save(rencoder.state_dict(), model_checkpoint + ".last") - # save to onnx + # save to onnx dummy_input = Variable(torch.randn(params['batch_size'], data_layer.vector_dim).type(torch.float)) torch.onnx.export(rencoder.float(), dummy_input.cuda() if use_gpu else dummy_input, model_checkpoint + ".onnx", verbose=True)