Le GPU entraîne ce réseau en 16 secondes environ. Le CPU en 13 secondes environ. (Je décommente/commente les lignes appropriées pour faire le test). Quelqu'un peut-il voir ce qui ne va pas dans mon code ou dans l'installation de pytorch ? (J'ai déjà vérifié que le GPU est disponible, et qu'il y a suffisamment de mémoire disponible sur le GPU.
from os import path
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu'
print(accelerator)
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.0-{platform}-linux_x86_64.whl torchvision
print("done")
#########################
import torch
from datetime import datetime
startTime = datetime.now()
dtype = torch.float
device = torch.device("cpu") # Comment this to run on GPU
# device = torch.device("cuda:0") # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1024, 128, 8
# Create random Tensors to hold input and outputs.
x = torch.randn(N, D_in, device=device, dtype=dtype)
t = torch.randn(N, D_out, device=device, dtype=dtype)
# Create random Tensors for weights.
w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)
w3 = torch.randn(D_out, D_out, device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-9
for i in range(10000):
y_pred = x.mm(w1).clamp(min=0).mm(w2).clamp(min=0).mm(w3)
loss = (y_pred - t).pow(2).sum()
if i % 1000 == 0:
print(i, loss.item())
loss.backward()
# Manually update weights using gradient descent
with torch.no_grad():
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad
# Manually zero the gradients after updating weights
w1.grad.zero_()
w2.grad.zero_()
print(datetime.now() - startTime)