Deploy yolov5 web service based on flask (1)

Deploy yolov5 web service based on flask (1)

This article is based on the official website of yolov5 and deploys the model inference in the form of webserve through the flask framework. The client uploads the image to be detected, and the server returns the processed results (the results can be string information directly output by the model or drawn after post-processing) Frame picture), only the overall process is running smoothly, and there are many areas that need to be optimized.
Code:

#This code as a whole is modified on detect.py of the yolov5 source code
import io
import json
import numpy as np
from torchvision import models
import torchvision.transforms as transforms
from PIL import Image
#Import flask related
from flask import Flask, jsonify, request

References in detect.py in #yolov5
import argparse
import os
import shutil
import time
from pathlib import Path

import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random

from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
    check_img_size, non_max_suppression, apply_classifier, scale_coords,
    xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized

#Network transmission pictures usually use base64 encoding, and the client encodes the picture into a string.
#The server accepts the string decoder into a picture and sends it to the yolov5 model for inference.
import base64

#Parameters for inference

#Load model, general operation, don’t worry about it
app = Flask(__name__)

#The following defines the encoding and decoding functions of base64
#Image encoding function
def image_to_base64(full_path):
    with open(full_path, "rb") as f:
        data = f.read()
        image_base64_enc = base64.b64encode(data)
        image_base64_enc = str(image_base64_enc, 'utf-8')
    return image_base64_enc

#Decoding, you can refer to the link: https://blog.csdn.net/ctwy291314/article/details/91493156
def base64_to_image(base64_code):
    # base64 decoding
    img_data = base64.b64decode(base64_code)
    #Convert to np array
    img_array = np.fromstring(img_data, np.uint8)
    # Convert to opencv usable format
    image_base64_dec = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR)
    return image_base64_dec

#yolov5 inference (detection) function, the same as the source code in yolov5
def detect(source, save_img=True):
    #Various parameters passed
    out, weights, view_img, save_txt, imgsz = \
        opt.output, opt.weights, opt.view_img, opt.save_txt, opt.img_size

    #Initialize
    set_logging()
    device = select_device(opt.device)
    if os.path.exists(out):
        shutil.rmtree(out) # delete output folder
    os.makedirs(out) # make new output folder
    half = device.type != 'cpu' # half precision only supported on CUDA

    # Load model
    model = attempt_load(weights, map_location=device) # load FP32 model
    imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
    if half:
        model.half() # to FP16

    # Second-stage classifier
    classify=False
    if classify:
        modelc = load_classifier(name='resnet101', n=2) # initialize
        modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
        modelc.to(device).eval()

    # Set Dataloader
    vid_path, vid_writer = None, None
    #if save_img = True:
    dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    names = model.module.names if hasattr(model, 'module') else model.names
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]

    # Run inference
    t0 = time.time()
    img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
    _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
    for path, img, im0s, vid_cap in dataset:
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float() # uint8 to fp16/32
        img /= 255.0 # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        #Inference
        t1 = time_synchronized()
        pred = model(img, augment=opt.augment)[0]

        # Apply NMS
        pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
        t2 = time_synchronized()

        # Apply Classifier
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        #Process detections
        for i, det in enumerate(pred): # detections per image
            #if webcam: # batch_size >= 1
            # p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
            if 1:
                p, s, im0 = path, '', im0s

            save_path = str(Path(out) / Path(p).name)
            txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
            s + = '%gx%g ' % img.shape[2:] # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
            if det is not None and len(det):
                print("line129")
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum() # detections per class
                    s + = '%g %ss, ' % (n, names[int(c)]) # add to string

                # Write results
                for *xyxy, conf, cls in reversed(det):
                    if save_txt: # Write to file
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
                        with open(txt_path + '.txt', 'a') as f:
                            f.write(('%g ' * 5 + '\
') % (cls, *xywh)) # label format

                    if save_img or view_img: # Add bbox to image
                        label = '%s %.2f' % (names[int(cls)], conf)
                        plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
            print('%sDone. (%.3fs)' % (s, t2 - t1))

    #im0 is the detection result picture of the drawn frame. For verification, you can save im0 and take a look.
    #cv2.imwrite("./serve_res.png", im0)
    return im0

#Describe the service type of webserve. The client cannot only specify the IP address when calling the server, but also needs to add this service binding (explained below)
@app.route('/predict', methods=['POST'])
def predict():
    if request.method == 'POST':
        file = request.files['file']
        img_bytes = file.read()
        #cv2.imwrite("clint-img.png", np.array(Image.open(io.BytesIO(img_bytes))))
        #Call detection function
        res_image = detect("clint-img.png")
        json_res = image_to_base64("./serve_res.png")

    The base encoding of the result image is stored in #jsonify, which can be decoded by the client to get the result image.
    return jsonify({<!-- -->'detect_res':json_res})
    
        #class_id, class_name = get_prediction(image_bytes=img_bytes)
        #return jsonify({'class_id': class_id, 'class_name': class_name})


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs=' + ', type=str, default='yolov5s.pt', help='model.pt path(s)')
    #source is not required, it is passed in directly from the client.
    #parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
    parser.add_argument('--output', type=str, default='inference/output', help='output folder') # output folder
    parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--view-img', action='store_true', help='display results')
    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
    parser.add_argument('--classes', nargs=' + ', type=int, help='filter by class: --class 0, or --class 0 2 3')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--update', action='store_true', help='update all models')
    opt = parser.parse_args()
    #Accept clint image as source
    #source = Client uploads images
    #app.run(host='192.168.0.0',port= 6000,debug=True)
    #Change to the IP address of the running machine
    app.run(debug=True,host='192.168.13.134',port=5001)

Later, we will introduce how the client requests the server and displays the detection image results.