1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
| import torch import torch.nn as nn import torch.optim as optim import time
assert torch.cuda.is_available(), "CUDA is not available" assert torch.backends.cudnn.enabled, "cuDNN is not enabled"
device = torch.device("cuda")
class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 8 * 8, 512) self.fc2 = nn.Linear(512, 10)
def forward(self, x): x = self.pool(torch.relu(self.conv1(x))) x = self.pool(torch.relu(self.conv2(x))) x = x.view(-1, 64 * 8 * 8) x = torch.relu(self.fc1(x)) x = self.fc2(x) return x
model = ConvNet().to(device)
criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001)
inputs = torch.randn(64, 3, 32, 32).to(device) labels = torch.randint(0, 10, (64,)).to(device)
model.train() start_time = time.time() target_duration = 300 epochs = 0 report_interval = 1000
while time.time() - start_time < target_duration: optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() epochs += 1
if epochs % report_interval == 0: print(f"Epoch: {epochs}, Loss: {loss.item()}")
end_time = time.time() elapsed_time = end_time - start_time print(f"Training finished. Total time: {elapsed_time:.2f} seconds, Epochs: {epochs}")
|