1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
| import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms
torch.manual_seed(42)
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ])
train_dataset = datasets.MNIST( './data', train=True, download=False, transform=transform )
test_dataset = datasets.MNIST( './data', train=False, download=False, transform=transform )
train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=64, shuffle=False )
test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=1000, shuffle=False )
class SimpleCNN(nn.Module): def __init__(self): super(SimpleCNN, self).__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1) ''' in_channels:输入数据的通道数 out_channels:输出数据的通道数,即卷积层中滤波器的数量 kernel_size:卷积核(滤波器)的大小,3*3 padding:输入数据的边缘填充的像素数. 当 kernel_size=3 且 padding=1 时,输出特征图的高度和宽度与输入相同 ''' self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) ''' 在每个 2x2 的区域内,取最大值作为输出。 kernel_size:池化窗口的大小 stride:池化窗口滑动的步长 ''' self.fc1 = nn.Linear(64 * 7 * 7, 128) self.fc2 = nn.Linear(128, 10)
def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = x.view(-1, 64 * 7 * 7) x = F.relu(self.fc1(x)) x = self.fc2(x) return x
model = SimpleCNN()
criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001)
num_epochs = 10 for epoch in range(num_epochs): model.train() running_loss = 0.0 for i, (inputs, labels) in enumerate(train_loader): optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()
running_loss += loss.item() if i % 100 == 99: print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(train_loader)}], Loss: {running_loss / 100:.4f}') running_loss = 0.0
model.eval() correct = 0 total = 0 with torch.no_grad(): for inputs, labels in test_loader: outputs = model(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item()
print(f'Accuracy of the network on the 10000 test images: {100 * correct / total:.2f}%')
torch.save(model.state_dict(), "CNN_model.pth")
|