Skip to content

Instantly share code, notes, and snippets.

@melgor
Created December 20, 2017 08:03
Show Gist options
  • Save melgor/2f9249ac80ce029f3bbb7987eb627ee1 to your computer and use it in GitHub Desktop.
Save melgor/2f9249ac80ce029f3bbb7987eb627ee1 to your computer and use it in GitHub Desktop.
Testing GPU at Deep Learning
import torchvision.models as models
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.autograd import Variable
import time
cudnn.benchmark = True
GPU_ID = [0] # this value enable setting Multi-GPU. For single GPU [0], for multi [0,1]
batch_size = 96 # this value increse computation time and data transfer between RAM and GPU. It also should fit to GPU memory. When using Multi-GPU, this value could be doubled
num_data = 5000 # this limit the length of test. Value should not be changed between the compared tests
num_iter = num_data/batch_size
input = Variable(torch.randn(batch_size, 3, 224, 224).cuda())
target = Variable(torch.LongTensor(batch_size).fill_(1).cuda())
model = models.resnet50()
model = torch.nn.DataParallel(model, GPU_ID).cuda()
loss = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), 0.001,
momentum=0.9,
weight_decay=1e-5)
print ("Start evaulation")
start = time.time()
for i in range(num_iter):
torch.cuda.synchronize()
out = model(input)
err = loss(out, target)
err.backward()
optimizer.step()
torch.cuda.synchronize()
print ("Averege Time for one data point: ", (time.time() - start)/num_data, ' batch size: ', batch_size, ' data points: ', num_data)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment