Softmax-Multiple classification problem notes-Station B: Mr. Liu Er’s “PyTorch Deep Learning Practice”

Table of Contents

1.Softmax formula

2.MNIST data set

3. Steps

(1) Prepare data set

(2)Construction model

(3) Choose the appropriate loss function and optimizer

(4) Conduct training and testing

4.Experimental results

5.Homework


1.Softmax formula

example:

CrossEntropyLoss included via Softmax Take y\hat{}And take the loss value

CrossEntropyLoss == LogSoftmax + NLLLoss

2.MNIST data set

If you convert the color values from 0-255 into numbers between 0-1, you can see that the original image can be replaced by such a 28*28 matrix

3.Step
(1) Prepare data set

import torch
from torchvision import transforms# Perform original processing on images
from torchvision import datasets#dataset
from torch.utils.data import DataLoader#Dataset
import torch.nn.functional as F# function such as relu
import torch.optim as optim #Optimizer

batch_size = 64
#Convert the PIL Image to Tensor, converted into an image tensor, data value 0-1, normalize (mean, standard deviation), perform normalization processing, the empirical values of the mean and standard deviation of all samples are calculated
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307, ),(0.3081, ))])

train_dataset = datasets.MNIST(root = '../dataset/mnist',train = True,download = True,transform = transform)
train_loader = DataLoader(train_dataset,shuffle=True,batch_size=batch_size)

test_dataset = datasets.MNIST(root = '../dataset/mnist',train = False,download = True,transform = transform)
test_loader = DataLoader(test_dataset,shuffle=True,batch_size=batch_size)
(2) Construction model

class Net(torch.nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.l1 = torch.nn.Linear(784,512)
        self.l2 = torch.nn.Linear(512,256)
        self.l3 = torch.nn.Linear(256,128)
        self.l4 = torch.nn.Linear(128,64)
        self.l5 = torch.nn.Linear(64,10)

    def forward(self,x):
        x = x.view(-1,784)
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return self.l5(x)#No activation, because cross entropy loss is calculated as softmax
    
model = Net()
(3) Choose the appropriate loss function and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01,momentum=0.5)
(4) Training and testing
#Encapsulate a loop into a function
def train(epoch):
    running_loss = 0.0
    for batch_idx,data in enumerate(train_loader,0):
        inputs, target = data
        optimizer.zero_grad()
        #feedforward, feedback, update
        outputs = model(inputs)
        loss = criterion(outputs,target)
        loss.backward()
        optimizer.step()

        running_loss + = loss.item()
        if batch_idx % 300 ==299:#Output running_loss every 300 training times
            print('[%d,]]loss :%.3f' % (epoch + 1,batch_idx + 1,running_loss/300))
            running_loss = 0.0

def test():
    correct = 0
    total=0
    #torch.no_grad() is a context manager that specifies that a block of code does not require gradient calculations.
    with torch.no_grad():
        for data in test_loader:
            images,labels = data
            outputs = model(images)
            _,predicted = torch.max(outputs.data,dim=1)#Find the maximum index predicted for each sample and assign it to predicted.
            total + = labels.size(0)#Returns the size of the first dimension of the labels tensor, which is the number of samples in the current batch.
            correct + = (predicted ==labels).sum().item()
    print("Correct rate:%d %%" % (100*correct/total))
if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
        test()
4. Experimental results

The accuracy rate is getting higher and higher, and the loss value is getting lower and lower.

5.Homework
import torch
from torch.utils.data import DataLoader, Dataset # Dataset
import torch.nn.functional as F# function such as relu
import numpy as np
import pandas as pd


def labels_id(labels):
    target_id = [] # Create a dictionary for all targets
    target_labels = ['Class_1','Class_2','Class_3','Class_4','Class_5','Class_6','Class_7','Class_8' ,'Class_9'] #Customize 9 labels
    for label in labels: # Traverse all labels in labels
        target_id.append(target_labels.index(label)) #Add the index item corresponding to label to target_labels
    return target_id


class DiabetesDataset(Dataset):#Inherit Dataset
    def __init__(self,filepath): #Constructor, initialization
        data = pd.read_csv(filepath)
        labels = data['target']
        self.len = data.shape[0] #How many rows and columns?

        # Process features and labels
        self.x_data = torch.tensor(np.array(data)[:, 1:-1].astype(float)) # 1:-1 is closed on the left and open on the right. There is no need to delete the first line. It will automatically jump when reading. past the first line
        self.y_data = labels_id(labels)

    def __getitem__(self, index):#You can get data through subscript operation and index
        return self.x_data[index],self.y_data[index]


    def __len__(self):#Get the number of data items in the data set
        return self.len

dataset1 = DiabetesDataset('train.csv')
train_loader = DataLoader(dataset = dataset1,batch_size=128,shuffle=True,num_workers=0)#This parameter indicates the number of sub-processes used for data loading. The data loading process can be accelerated by using multiple child processes to load data


class Net(torch.nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.l1 = torch.nn.Linear(93,64)
        self.l2 = torch.nn.Linear(64,32)
        self.l3 = torch.nn.Linear(32,16)
        self.l4 = torch.nn.Linear(16,9)

    def forward(self,x):
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        return self.l4(x)# No activation, because cross entropy loss is calculated as softmax

    def predict(self, x): # Prediction function
        with torch.no_grad(): # Clear the gradient and do not accumulate gradients
            x = F.relu(self.l1(x))
            x = F.relu(self.l2(x))
            x = F.relu(self.l3(x))
            x = F.relu(self.l4(x))
            # Here we first take out the index with the highest probability, which is the predicted category.
            _, predicted = torch.max(x, dim=1)
            # Convert the predicted category to one-hot representation to facilitate saving as a prediction file.
            y = pd.get_dummies(predicted).astype(int)
            return y


model = Net()

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01,momentum=0.5)

#Encapsulate a loop into a function
def train(epoch):
    running_loss = 0.0
    for batch_idx,data in enumerate(train_loader,0):
        inputs, target = data
        inputs = inputs.float()
        optimizer.zero_grad()
        #feedforward, feedback, update
        outputs = model(inputs)
        loss = criterion(outputs,target)
        loss.backward()
        optimizer.step()

        running_loss + = loss.item()
        if batch_idx % 300 ==299:#Output running_loss every 300 training times
            print('[%d,]]loss :%.3f' % (epoch + 1,batch_idx + 1,running_loss/300))
            running_loss = 0.0


dataset2 = pd.read_csv('test.csv')
test_set = torch.from_numpy(dataset2.iloc[:, 1:].values)#No need to delete the first line, the first line is automatically skipped when reading

def predict_save():

out = model.predict(test_set.float()) # Call the prediction function and change the inputs to float format
# Customize new tags
labels = ['Class_1', 'Class_2', 'Class_3', 'Class_4', 'Class_5', 'Class_6','Class_7', 'Class_8' , 'Class_9']

#Add column labels
out.columns = labels

#Insert id row
out.insert(0, 'id', dataset2['id'])
output = pd.DataFrame(out)
output.to_csv('my_predict.csv', index=False)
return output

if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
    predict_save()

Reference: Master Liu Er’s “Pytorch Deep Learning” Lecture 9 Assignment_Master Liu Er’s pytorchp9 homework answers_little red519’s blog-CSDN blog

Master Liu Er’s PyTorch deep learning practice notes P9 multi-classification problem_Master Liu Er’s multi-classification code_Xiaobai*advanced ing’s blog-CSDN blog