Pytorch cat and dog identification case

Cat and dog recognition data seticon-default.png?t=N7T8https://download.csdn.net/download/Victor_Li_/88483483?spm=1001.2014.3001.5501

Training set image path

Test set image path

The training code is as follows

import torch
import torchvision
import matplotlib.pyplot as plt
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
import time
from torch.optim.lr_scheduler import StepLR

if __name__ == '__main__':
    torch.autograd.set_detect_anomaly(True)
    mp.freeze_support()
    train_on_gpu = torch.cuda.is_available()
    if not train_on_gpu:
        print('CUDA is not available. Training on CPU...')
    else:
        print('CUDA is available! Training on GPU...')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = 32
    #Set transformation for data preprocessing
    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((224, 224)), # Adjust the image size to 224x224
        torchvision.transforms.RandomHorizontalFlip(),
        torchvision.transforms.RandomRotation(45),
        torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        torchvision.transforms.ToTensor(), # Convert to tensor
        torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) # Normalize
    ])
    dataset = torchvision.datasets.ImageFolder('./cats_and_dogs_train',
                                               transform=transform)

    val_ratio = 0.2
    val_size = int(len(dataset) * val_ratio)
    train_size = len(dataset) - val_size
    train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])

    train_dataset = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4,
                                                pin_memory=True)
    val_dataset = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, num_workers=4, pin_memory=True)

    # x,y = next(iter(val_dataset))
    # x = x.permute(1, 2, 0) # Adjust the channel dimension to the end
    # x = (x - x.min()) / (x.max() - x.min()) # Denormalization operation
    # plt.imshow(x) #Adjust channel dimensions to the end
    # plt.axis('off') # Turn off the coordinate axis
    # plt.show()

    model = models.resnet34(weights=None)

    num_classes = 2
    model.fc = nn.Sequential(
        nn.Dropout(p=0.2),
        # nn.BatchNorm4d(model.fc.in_features),
        nn.Linear(model.fc.in_features, num_classes),
        nn.Sigmoid(),
    )
    lambda_L1 = 0.001
    lambda_L2 = 0.0001
    regularization_loss_L1 = 0
    regularization_loss_L2 = 0
    for name,param in model.named_parameters():
        param.requires_grad = True
        if 'bias' not in name:
            regularization_loss_L1 + = torch.norm(param, p=1).detach()
            regularization_loss_L2 + = torch.norm(param, p=2).detach()

    optimizer = optim.Adam(model.parameters(), lr=0.01)
    scheduler = StepLR(optimizer, step_size=5, gamma=0.9)
    criterion = nn.BCELoss().to(device)

    model.to(device)
    # print(model)
    loadfilename = "recognize_cats_and_dogs.pt"
    savefilename = "recognize_cats_and_dogs3.pt"

    checkpoint = torch.load(loadfilename)
    model.load_state_dict(checkpoint['model_state_dict'])


    def save_checkpoint(epoch, model, optimizer, filename, train_loss=0., val_loss=0.):
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_loss': train_loss,
            'val_loss': val_loss,
        }
        torch.save(checkpoint, filename)


    num_epochs = 100
    train_loss = []
    for epoch in range(num_epochs):
        running_loss = 0
        correct = 0
        total=0
        epoch_start_time = time.time()
        for i, (inputs, labels) in enumerate(train_dataset):
            # Put data on the device
            inputs, labels = inputs.to(device), labels.to(device)
            # Forward calculation
            outputs = model(inputs)
            one_hot = nn.functional.one_hot(labels, num_classes).float()
            # Calculate loss and gradient
            loss = criterion(outputs, one_hot) + lambda_L1 * regularization_loss_L1 + lambda_L2 * regularization_loss_L2
            loss.backward()
            if ((i + 1) % 2 == 0) or (i + 1 == len(train_dataset)):
                # Update model parameters
                optimizer.step()
                optimizer.zero_grad()

            # Record loss and accuracy
            running_loss + = loss.item()
            train_loss.append(loss.item())
            _, predicted = torch.max(outputs.data, 1)
            correct + = (predicted == labels).sum().item()
            total + = labels.size(0)
        accuracy_train = 100 * correct / total
        # Calculate the accuracy on the test set
        with torch.no_grad():
            running_loss_test = 0
            correct_test = 0
            total_test = 0
            for inputs, labels in val_dataset:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                one_hot = nn.functional.one_hot(labels, num_classes).float()
                loss = criterion(outputs, one_hot)
                running_loss_test + = loss.item()

                _, predicted = torch.max(outputs.data, 1)
                correct_test + = (predicted == labels).sum().item()
                total_test + = labels.size(0)
            accuracy_test = 100 * correct_test / total_test
            # Output the loss and accuracy of each epoch
        epoch_end_time = time.time()
        epoch_time = epoch_end_time - epoch_start_time
        tain_loss = running_loss / len(train_dataset)
        val_loss = running_loss_test / len(val_dataset)
        print(
            "Epoch [{}/{}], Time: {:.4f}s, Loss: {:.4f}, Train Accuracy: {:.2f}%, Loss: {:.4f}, Test Accuracy: { :.2f}%"
            .format(epoch + 1, num_epochs, epoch_time, tain_loss,
                    accuracy_train, val_loss, accuracy_test))
        save_checkpoint(epoch, model, optimizer, savefilename, tain_loss, val_loss)
        scheduler.step()

    # plt.plot(train_loss, label='Train Loss')
    # # Add legend and labels
    # plt.legend()
    # plt.xlabel('Epochs')
    # plt.ylabel('Loss')
    # plt.title('Training Loss')
    #
    # # Display graphics
    # plt.show()

The test code is as follows

import torch
import torchvision
import torch.nn as nn
import torchvision.models as models
import matplotlib.pyplot as plt
import torch.multiprocessing as mp

if __name__ == '__main__':
    mp.freeze_support()
    train_on_gpu = torch.cuda.is_available()
    if not train_on_gpu:
        print('CUDA is not available. Training on CPU...')
    else:
        print('CUDA is available! Training on GPU...')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = 32
    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((224,224)), # Adjust the image size to 224x224
        torchvision.transforms.ToTensor(), # Convert to tensor
        torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) # Normalize
    ])
    dataset = torchvision.datasets.ImageFolder('./cats_and_dogs_test',
                                                     transform=transform)

    test_dataset = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,num_workers=4, pin_memory=True)

    model = models.resnet34()

    num_classes = 2
    for param in model.parameters():
        param.requires_grad = False

    model.fc = nn.Sequential(
        nn.Dropout(),
        nn.Linear(model.fc.in_features,num_classes),
        nn.LogSoftmax(dim=1)
    )
    model.to(device)
    # print(model)

    filename = "recognize_cats_and_dogs.pt"
    checkpoint = torch.load(filename)
    model.load_state_dict(checkpoint['model_state_dict'])

    class_name = ['cat','dog']
    # Calculate the accuracy on the test set
    with torch.no_grad():
        for inputs, labels in test_dataset:
            inputs, labels = inputs.to(device), labels.to(device)
            output = model(inputs)
            _, predicted = torch.max(output.data, 1)
            for x,y,z in zip(inputs,labels,predicted):
                x = (x - x.min()) / (x.max() - x.min())
                plt.imshow(x.cpu().permute(1,2,0))
                plt.axis('off')
                plt.title('predicted: {0}'.format(class_name[z]))
                plt.show()

Some test results are as follows