From 4c4dd090b25f1804cc04ed34357f91286b4138f0 Mon Sep 17 00:00:00 2001 From: Alex Grigorevskiy Date: Mon, 29 Jan 2018 12:00:26 +0200 Subject: [PATCH 1/2] CPU run: Added possibility to run entirely on CPU. --- infer.py | 8 ++++++-- run.py | 12 ++++++++---- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/infer.py b/infer.py index 1c374d1..2f372d6 100644 --- a/infer.py +++ b/infer.py @@ -31,6 +31,9 @@ args = parser.parse_args() print(args) +use_gpu = torch.cuda.is_available() # global flag +print('GPU is available.') if use_gpu else print('GPU is not available.') + def main(): params = dict() params['batch_size'] = 1 @@ -71,14 +74,15 @@ def main(): print('######################################################') print('######################################################') rencoder.eval() - rencoder = rencoder.cuda() + if use_gpu: rencoder = rencoder.cuda() + inv_userIdMap = {v: k for k, v in data_layer.userIdMap.items()} inv_itemIdMap = {v: k for k, v in data_layer.itemIdMap.items()} eval_data_layer.src_data = data_layer.data with open(args.predictions_path, 'w') as outf: for i, ((out, src), majorInd) in enumerate(eval_data_layer.iterate_one_epoch_eval(for_inf=True)): - inputs = Variable(src.cuda().to_dense()) + inputs = Variable(src.cuda().to_dense() if use_gpu else src.to_dense()) targets_np = out.to_dense().numpy()[0, :] outputs = rencoder(inputs).cpu().data.numpy()[0, :] non_zeros = targets_np.nonzero()[0].tolist() diff --git a/run.py b/run.py index 77ec8ce..60e31c2 100644 --- a/run.py +++ b/run.py @@ -53,13 +53,16 @@ args = parser.parse_args() print(args) +use_gpu = torch.cuda.is_available() # global flag +print('GPU is available.') if use_gpu else print('GPU is not available.') + def do_eval(encoder, evaluation_data_layer): encoder.eval() denom = 0.0 total_epoch_loss = 0.0 for i, (eval, src) in enumerate(evaluation_data_layer.iterate_one_epoch_eval()): - inputs = Variable(src.cuda().to_dense()) - targets = Variable(eval.cuda().to_dense()) + inputs = Variable(src.cuda().to_dense() if use_gpu else src.to_dense()) + targets = Variable(eval.cuda().to_dense() if use_gpu else eval.to_dense()) outputs = encoder(inputs) loss, num_ratings = model.MSEloss(outputs, targets) total_epoch_loss += loss.data[0] @@ -139,7 +142,8 @@ def main(): if len(gpu_ids)>1: rencoder = nn.DataParallel(rencoder, device_ids=gpu_ids) - rencoder = rencoder.cuda() + + if use_gpu: rencoder = rencoder.cuda() if args.optimizer == "adam": optimizer = optim.Adam(rencoder.parameters(), @@ -177,7 +181,7 @@ def main(): if args.optimizer == "momentum": scheduler.step() for i, mb in enumerate(data_layer.iterate_one_epoch()): - inputs = Variable(mb.cuda().to_dense()) + inputs = Variable(mb.cuda().to_dense() if use_gpu else mb.to_dense()) optimizer.zero_grad() outputs = rencoder(inputs) loss, num_ratings = model.MSEloss(outputs, inputs) From 1d1cf1527fc16e92081ccf4d8c1a4893c848b5c2 Mon Sep 17 00:00:00 2001 From: Alex Grigorevskiy Date: Mon, 29 Jan 2018 13:20:18 +0200 Subject: [PATCH 2/2] CPU run: modified output. --- infer.py | 5 ++++- run.py | 7 +++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/infer.py b/infer.py index 2f372d6..6018378 100644 --- a/infer.py +++ b/infer.py @@ -32,7 +32,10 @@ print(args) use_gpu = torch.cuda.is_available() # global flag -print('GPU is available.') if use_gpu else print('GPU is not available.') +if use_gpu: + print('GPU is available.') +else: + print('GPU is not available.') def main(): params = dict() diff --git a/run.py b/run.py index 60e31c2..8ca723a 100644 --- a/run.py +++ b/run.py @@ -54,7 +54,10 @@ print(args) use_gpu = torch.cuda.is_available() # global flag -print('GPU is available.') if use_gpu else print('GPU is not available.') +if use_gpu: + print('GPU is available.') +else: + print('GPU is not available.') def do_eval(encoder, evaluation_data_layer): encoder.eval() @@ -236,4 +239,4 @@ def main(): torch.save(rencoder.state_dict(), model_checkpoint + ".last") if __name__ == '__main__': - main() \ No newline at end of file + main()