Week Five–P5 Week: Sneaker Identification

>- ** This article is the learning record blog in [365-day deep learning training camp](https://mp.weixin.qq.com/s/Nb93582M_5usednAKp_Jtw)**
>- ** Reference article: [Pytorch Practical Combat | Week P5: Sneaker Identification](https://www.heywhale.com/mw/project/6352467ca42e79f98f6bbf13)**
>- ** Original author: [Classmate K | Tutoring, project customization](https://mtyjkh.blog.csdn.net/)**
>- ** Article source: [Student K’s study circle](https://www.yuque.com/mingtian-fkmxf/zxwb45)**

1. Preparation

1. Set up GPU

import torch
import torchvision
from torchvision import transforms,datasets
import torchvision.transforms as transforms
import torch.nn as nn

import os,PIL,pathlib

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

device 

device(type=’cuda’)

2. Import data

#View categories
data_path = r"F:\P5_data\test"
data_path = pathlib.Path(data_path)
class_path = list(data_path.glob("*"))
class_names = [str(i).split("")[-1] for i in class_path]
class_names
['adidas', 'nike']
#View pictures
import matplotlib.pyplot as plt
from PIL import Image

fig,axes = plt.subplots(3, 8, figsize=(16, 6))
image_folder = r"F:\P5_data\train\adidas"
image_files = [f for f in os.listdir(image_folder) if f.endswith((".jpg",".png","jepg"))]

for ax,file in zip(axes.flat,image_files):
    img_path = os.path.join(image_folder, file)
    img = Image.open(img_path)
    ax.imshow(img)
    ax.axis('off')

import torchvision.transforms as transforms
train_transforms = transforms.Compose([
    transforms.Resize([224,224]),
    transforms.ToTensor(),
    transforms.Normalize(
    mean=[0.485,0.456,0.406],
    std=[0.229,0.224,0.225])
])

test_transforms = transforms.Compose([
    transforms.Resize([224,224]),
    transforms.ToTensor(),
    transforms.Normalize(
    mean=[0.485,0.456,0.406],
    std=[0.229,0.224,0.225])
    
])

train_dataset = datasets.ImageFolder(r"F:\P5_data\train",transform=train_transforms)
test_dataset = datasets.ImageFolder(r"F:\P5_data\test",transform=test_transforms)
train_dataset.class_to_idx
test_dataset.class_to_idx

{‘adidas’: 0, ‘nike’: 1}

batch_size = 32
train_dl = torch.utils.data.DataLoader(train_dataset,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=1)

for X,y in test_dl:
    print("shape of X [N,C,H,W]:", X.shape)
    print("shape of y:",y.shape,y.dtype)
    break

2. Build a simple CNN model

import torch.nn.functional as F

class Model(nn.Module):
    
    def __init__(self):
    
        super(Model,self).__init__()
        
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 12, kernel_size=5, padding=0),#12*220*220
            nn.BatchNorm2d(12),
            nn.ReLU())
        self.conv2 = nn.Sequential(
            nn.Conv2d(12, 12, kernel_size=5, padding=0),#12*216*216
            nn.BatchNorm2d(12),
            nn.ReLU())
        self.pool1 = nn.Sequential(
            nn.MaxPool2d(2)) #12*108*108
        self.conv4 = nn.Sequential(
            nn.Conv2d(12, 24, kernel_size=5, padding=0),#24*104*104
            nn.BatchNorm2d(24),
            nn.ReLU())
        self.conv5 = nn.Sequential(
            nn.Conv2d(24, 24, kernel_size=5, padding=0),#24*100*100
            nn.BatchNorm2d(24),
            nn.ReLU())
        self.pool2 = nn.Sequential(
            nn.MaxPool2d(2)) #24*50*50
        self.conv7 = nn.Sequential(
            nn.Conv2d(24, 48, kernel_size=5, padding=0),#48*46*46
            nn.BatchNorm2d(48),
            nn.ReLU())
        self.conv8 = nn.Sequential(
            nn.Conv2d(48, 48, kernel_size=5, padding=0),#48*42*42
            nn.BatchNorm2d(48),
            nn.ReLU())
        self.pool3 = nn.Sequential(
            nn.MaxPool2d(2)) #48*21*21
        
        self.dropout =nn.Sequential(
            nn.Dropout(0.2))
        
        self.fc = nn.Sequential(
            nn.Linear(48*21*21,len(class_names)))
        
    def forward(self, x):
        
        batch_size = x.size(0)
        x = self.conv1(x) #Convolution-BN-activation
        x = self.conv2(x) #Convolution-BN-activation
        x = self.pool1(x) #Pooling layer
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.pool2(x)
        x = self.conv7(x)
        x = self.conv8(x)
        x = self.pool3(x)
        x = self.dropout(x)
        x = x.view(batch_size,-1)
        x = self.fc(x)
        
        return x
    
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print ("Using {} device:".format(device))

model = Model().to(device)
model
Using cuda device:

Out[8]:

Model(
  (conv1): Sequential(
    (0): Conv2d(3, 12, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (conv2): Sequential(
    (0): Conv2d(12, 12, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (pool1): Sequential(
    (0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (conv4): Sequential(
    (0): Conv2d(12, 24, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (conv5): Sequential(
    (0): Conv2d(24, 24, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (pool2): Sequential(
    (0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (conv7): Sequential(
    (0): Conv2d(24, 48, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (conv8): Sequential(
    (0): Conv2d(48, 48, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (pool3): Sequential(
    (0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (dropout): Sequential(
    (0): Dropout(p=0.2, inplace=False)
  )
  (fc): Sequential(
    (0): Linear(in_features=21168, out_features=2, bias=True)
  )
)

3. Training model

##Write training function

def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset) #Training set size
    num_batches = len(dataloader) #number of batches
    
    
    train_loss, train_acc =0, 0 #Initialize training loss and accuracy
    
    for X,y in dataloader:
        X,y = X.to(device),y.to(device)
        
        #Calculate prediction error
        pred = model(X)
        loss = loss_fn(pred,y)
        
        #backpropagation
        optimizer.zero_grad() #grad attribute zero
        loss.backward() #Reverse propagation
        optimizer.step() #Automatically update each step
        
        #Record acc and loss
        train_acc + =(pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss + =loss.item()
        
    train_acc /= size
    train_loss /= num_batches
    
    return train_acc,train_loss
##Write test function

def test (dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    test_acc, test_loss =0, 0
    
    with torch.no_grad():
        for imgs,target in dataloader:
            imgs, target =imgs.to(device),target.to(device)
            
            #Calculate loss and acc
            target_pred = model(imgs)
            loss = loss_fn(target_pred, target)
            
            test_loss + = loss.item()
            test_acc + =(target_pred.argmax(1) == target).type(torch.float).sum().item()
            
    test_acc /= size
    test_loss /= num_batches
    
    return test_acc,test_loss
            
#Set dynamic learning rate
def adjust_learning_rate(optimizer,epoch,start_lr):
    # Decay to the original 0.98 every 2 epochs
    lr = start_lr * (0.98**(epoch // 2))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
        
learn_rate = 1e-4 #Initial learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
#Formal training
loss_fn = nn.CrossEntropyLoss() #Create loss function
epochs = 50

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    # Update learning rate (used when using custom learning rate)
    adjust_learning_rate(optimizer, epoch, learn_rate)
    
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)
    # scheduler.step() # Update learning rate (used when calling the official dynamic learning rate interface)
    
    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
    
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    # Get the current learning rate
    lr = optimizer.state_dict()['param_groups'][0]['lr']
    
    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:. 2E}')
    print(template.format(epoch + 1, epoch_train_acc*100, epoch_train_loss,
                          epoch_test_acc*100, epoch_test_loss, lr))
print('Done')
Epoch: 1, Train_acc:56.4%, Train_loss:0.722, Test_acc:50.0%, Test_loss:0.698, Lr:1.00E-04
Epoch: 2, Train_acc:58.4%, Train_loss:0.678, Test_acc:65.8%, Test_loss:0.657, Lr:1.00E-04
Epoch: 3, Train_acc:61.8%, Train_loss:0.669, Test_acc:73.7%, Test_loss:0.633, Lr:9.80E-05
Epoch: 4, Train_acc:65.3%, Train_loss:0.621, Test_acc:72.4%, Test_loss:0.577, Lr:9.80E-05
Epoch: 5, Train_acc:67.7%, Train_loss:0.607, Test_acc:73.7%, Test_loss:0.566, Lr:9.60E-05
Epoch: 6, Train_acc:67.3%, Train_loss:0.590, Test_acc:76.3%, Test_loss:0.571, Lr:9.60E-05
Epoch: 7, Train_acc:69.1%, Train_loss:0.578, Test_acc:72.4%, Test_loss:0.566, Lr:9.41E-05
Epoch: 8, Train_acc:71.7%, Train_loss:0.561, Test_acc:77.6%, Test_loss:0.552, Lr:9.41E-05
Epoch: 9, Train_acc:69.1%, Train_loss:0.543, Test_acc:82.9%, Test_loss:0.551, Lr:9.22E-05
Epoch:10, Train_acc:71.1%, Train_loss:0.551, Test_acc:81.6%, Test_loss:0.525, Lr:9.22E-05
Epoch:11, Train_acc:74.1%, Train_loss:0.509, Test_acc:81.6%, Test_loss:0.524, Lr:9.04E-05
Epoch:12, Train_acc:73.7%, Train_loss:0.517, Test_acc:81.6%, Test_loss:0.583, Lr:9.04E-05
Epoch:13, Train_acc:73.3%, Train_loss:0.504, Test_acc:78.9%, Test_loss:0.524, Lr:8.86E-05
Epoch:14, Train_acc:78.1%, Train_loss:0.498, Test_acc:84.2%, Test_loss:0.536, Lr:8.86E-05
Epoch:15, Train_acc:76.1%, Train_loss:0.493, Test_acc:84.2%, Test_loss:0.496, Lr:8.68E-05
Epoch:16, Train_acc:80.7%, Train_loss:0.468, Test_acc:84.2%, Test_loss:0.497, Lr:8.68E-05
Epoch:17, Train_acc:77.9%, Train_loss:0.469, Test_acc:85.5%, Test_loss:0.514, Lr:8.51E-05
Epoch:18, Train_acc:80.7%, Train_loss:0.463, Test_acc:85.5%, Test_loss:0.484, Lr:8.51E-05
Epoch:19, Train_acc:80.5%, Train_loss:0.454, Test_acc:85.5%, Test_loss:0.509, Lr:8.34E-05
Epoch:20, Train_acc:80.1%, Train_loss:0.452, Test_acc:76.3%, Test_loss:0.499, Lr:8.34E-05
Epoch:21, Train_acc:82.5%, Train_loss:0.441, Test_acc:84.2%, Test_loss:0.506, Lr:8.17E-05
Epoch:22, Train_acc:84.1%, Train_loss:0.420, Test_acc:81.6%, Test_loss:0.484, Lr:8.17E-05
Epoch:23, Train_acc:84.5%, Train_loss:0.421, Test_acc:85.5%, Test_loss:0.537, Lr:8.01E-05
Epoch:24, Train_acc:85.3%, Train_loss:0.412, Test_acc:88.2%, Test_loss:0.460, Lr:8.01E-05
Epoch:25, Train_acc:85.9%, Train_loss:0.410, Test_acc:86.8%, Test_loss:0.488, Lr:7.85E-05
Epoch:26, Train_acc:86.7%, Train_loss:0.392, Test_acc:80.3%, Test_loss:0.516, Lr:7.85E-05
Epoch:27, Train_acc:86.5%, Train_loss:0.395, Test_acc:82.9%, Test_loss:0.456, Lr:7.69E-05
Epoch:28, Train_acc:87.1%, Train_loss:0.396, Test_acc:88.2%, Test_loss:0.459, Lr:7.69E-05
Epoch:29, Train_acc:88.4%, Train_loss:0.387, Test_acc:86.8%, Test_loss:0.440, Lr:7.54E-05
Epoch:30, Train_acc:88.8%, Train_loss:0.370, Test_acc:84.2%, Test_loss:0.466, Lr:7.54E-05
Epoch:31, Train_acc:87.8%, Train_loss:0.379, Test_acc:88.2%, Test_loss:0.421, Lr:7.39E-05
Epoch:32, Train_acc:88.6%, Train_loss:0.363, Test_acc:86.8%, Test_loss:0.439, Lr:7.39E-05
Epoch:33, Train_acc:89.4%, Train_loss:0.360, Test_acc:73.7%, Test_loss:0.439, Lr:7.24E-05
Epoch:34, Train_acc:88.6%, Train_loss:0.359, Test_acc:85.5%, Test_loss:0.462, Lr:7.24E-05
Epoch:35, Train_acc:90.2%, Train_loss:0.350, Test_acc:86.8%, Test_loss:0.464, Lr:7.09E-05
Epoch:36, Train_acc:87.8%, Train_loss:0.352, Test_acc:88.2%, Test_loss:0.465, Lr:7.09E-05
Epoch:37, Train_acc:91.2%, Train_loss:0.333, Test_acc:86.8%, Test_loss:0.464, Lr:6.95E-05
Epoch:38, Train_acc:88.4%, Train_loss:0.353, Test_acc:86.8%, Test_loss:0.442, Lr:6.95E-05
Epoch:39, Train_acc:91.4%, Train_loss:0.335, Test_acc:88.2%, Test_loss:0.466, Lr:6.81E-05
Epoch:40, Train_acc:89.4%, Train_loss:0.336, Test_acc:88.2%, Test_loss:0.448, Lr:6.81E-05
Epoch:41, Train_acc:90.0%, Train_loss:0.339, Test_acc:89.5%, Test_loss:0.457, Lr:6.68E-05
Epoch:42, Train_acc:91.8%, Train_loss:0.328, Test_acc:88.2%, Test_loss:0.453, Lr:6.68E-05
Epoch:43, Train_acc:91.4%, Train_loss:0.315, Test_acc:89.5%, Test_loss:0.490, Lr:6.54E-05
Epoch:44, Train_acc:90.6%, Train_loss:0.323, Test_acc:86.8%, Test_loss:0.437, Lr:6.54E-05
Epoch:45, Train_acc:92.0%, Train_loss:0.320, Test_acc:88.2%, Test_loss:0.444, Lr:6.41E-05
Epoch:46, Train_acc:93.0%, Train_loss:0.312, Test_acc:88.2%, Test_loss:0.455, Lr:6.41E-05
Epoch:47, Train_acc:92.6%, Train_loss:0.314, Test_acc:89.5%, Test_loss:0.472, Lr:6.28E-05
Epoch:48, Train_acc:92.0%, Train_loss:0.305, Test_acc:86.8%, Test_loss:0.412, Lr:6.28E-05
Epoch:49, Train_acc:92.8%, Train_loss:0.303, Test_acc:90.8%, Test_loss:0.429, Lr:6.16E-05
Epoch:50, Train_acc:92.4%, Train_loss:0.308, Test_acc:90.8%, Test_loss:0.419, Lr:6.16E-05
Done

4. Results visualization

import matplotlib.pyplot as plt
#hidewarning
import warnings
warnings.filterwarnings("ignore") #Ignore warning messages
plt.rcParams['font.sans-serif'] = ['SimHei'] # Used to display Chinese labels normally
plt.rcParams['axes.unicode_minus'] = False # Used to display negative signs normally
plt.rcParams['figure.dpi'] = 100 #Resolution

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

The knowledge points of the article match the official knowledge archives, and you can further learn relevant knowledge. Python entry skill treeArtificial intelligenceDeep learning 388309 people are learning the system