Example 3: Implementation of convolutional neural network ResNet18+tensorflow2 (complete code + comments)

Basic idea: As the gradient deepens, it is difficult for the subsequent gradient to propagate to the previous layer, so a short circuit idea is added.
The basic block contains 2 convolutional layers and 1 short line (as shown below)

basicblock diagram

18167d39c0da4a9eb2b7a89935e13707.png

A resblock diagram

292fefff8c49495b9be9937d8f2649de.png

resnet18 diagram

a104903adab24630b297ccbcff808ccc.png

Code that defines resnet18

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential



class BasicBlock(layers.Layer):
    # Residual module
    def __init__(self, filter_num, stride=1):
        super(BasicBlock, self).__init__()
        #The first convolution unit
        self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same')
        # padding will be completed according to your step size, the step size is 1, output = input
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation('relu')
        # The second convolution unit
        self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same')
        self.bn2 = layers.BatchNormalization()

        if stride != 1:# Complete shape matching through 1x1 convolution
            self.downsample = Sequential()
            self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
        else:# shape matching, short circuit directly
            self.downsample = lambda x:x

    def call(self, inputs, training=None):

        # [b, h, w, c], through the first convolution unit
        out = self.conv1(inputs)
        out = self.bn1(out)
        out = self.relu(out)
        # Pass the second convolution unit
        out = self.conv2(out)
        out = self.bn2(out)
        # Through the identity module
        identity = self.downsample(inputs) # Note that the input for downsampling here is input, so that the size can be restored to the same as the input
        # The 2 path outputs are added directly
        output = layers.add([out, identity])
        output = tf.nn.relu(output) # In order to prevent parameter confusion, use the relu of the function for activation

        return output


class ResNet(keras.Model):
    #General ResNet implementation class
    def __init__(self, layer_dims, num_classes=10): # [2, 2, 2, 2] 4 resblocks, the number of basicblocks each contains
        super(ResNet, self).__init__()
        # Root network, preprocessing
        self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
                                layers.BatchNormalization(),
                                layers.Activation('relu'),
                                layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
                                ])
        # Stack 4 Blocks, each block contains multiple BasicBlocks, and the setting steps are different
        self.layer1 = self.build_resblock(64, layer_dims[0])
        self.layer2 = self.build_resblock(128, layer_dims[1], stride=2) #The stride is 2, making the size smaller and the channel longer
        self.layer3 = self.build_resblock(256, layer_dims[2], stride=2)
        self.layer4 = self.build_resblock(512, layer_dims[3], stride=2) # The output dimensions cannot be known specifically at this time

        # Reduce the height and width to 1x1 through the Pooling layer output: [b,512,h,w] >> [b,512,1,1]
        self.avgpool = layers.GlobalAveragePooling2D()
        #Finally connect a fully connected layer classification
        self.fc = layers.Dense(num_classes)

    def call(self, inputs, training=None):
        # Via root network
        x = self.stem(inputs)
        # Pass 4 modules at once
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        # through pooling layer
        x = self.avgpool(x)
        # Through the fully connected layer
        x = self.fc(x)

        return x



    def build_resblock(self, filter_num, blocks, stride=1):
        #Auxiliary function, stack filter_num BasicBlocks
        res_blocks = Sequential()
        # Only the step size of the first BasicBlock may not be 1 to implement downsampling
        res_blocks.add(BasicBlock(filter_num, stride))

        for _ in range(1, blocks):#Other BasicBlock steps are all 1
            res_blocks.add(BasicBlock(filter_num, stride=1))

        return res_blocks


def resnet18():
    # Implement different ResNets by adjusting the number and configuration of BasicBlocks inside the module
    return ResNet([2, 2, 2, 2])


def resnet34():
     # Implement different ResNets by adjusting the number and configuration of BasicBlocks inside the module
    return ResNet([3, 4, 6, 3])

Using resnet18 for deep learning code

import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential
import os
from resnet import resnet18

os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
tf.random.set_seed(2345)





def preprocess(x, y):
    # Map data to -1~1
    x = 2*tf.cast(x, dtype=tf.float32) / 255. - 1
    y = tf.cast(y, dtype=tf.int32) #Type conversion
    return x,y


(x,y), (x_test, y_test) = datasets.cifar10.load_data() # Load the dataset
y = tf.squeeze(y, axis=1) # Remove unnecessary dimensions
y_test = tf.squeeze(y_test, axis=1) # Delete unnecessary dimensions
print(x.shape, y.shape, x_test.shape, y_test.shape)


train_db = tf.data.Dataset.from_tensor_slices((x,y)) # Build a training set
# Randomly break up, preprocess, and batch
train_db = train_db.shuffle(1000).map(preprocess).batch(512)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test)) #Build the test set
# Randomly break up, preprocess, and batch
test_db = test_db.map(preprocess).batch(512)
# Take a sample
sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))


def main():

    # [b, 32, 32, 3] => [b, 1, 1, 512]
    model = resnet18() # ResNet18 network
    model.build(input_shape=(None, 32, 32, 3))
    model.summary() # Statistical network parameters
    optimizer = optimizers.Adam(lr=1e-4) # Build the optimizer

    for epoch in range(100): # Training epoch

        for step, (x,y) in enumerate(train_db):

            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 10], forward propagation
                logits = model(x)
                # [b] => [b, 10], one-hot encoding
                y_onehot = tf.one_hot(y, depth=10)
                # Calculate cross entropy
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)
            # Calculate gradient information
            grads = tape.gradient(loss, model.trainable_variables)
            # Update network parameters
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if step P == 0:
                print(epoch, step, 'loss:', float(loss))



        total_num = 0
        total_correct=0
        for x,y in test_db:

            logits = model(x)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)

            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num + = x.shape[0]
            total_correct + = int(correct)

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)



if __name__ == '__main__':
    main()

Source of code and schematic:

Lesson 104 ResNet Practice-1_bilibili_bilibili

Traffic Sign Classification | HackerNoon

The knowledge points of the article match the official knowledge files, and you can further learn relevant knowledge. Python entry skill treeArtificial intelligenceDeep learning 383086 people are learning the system