Python – upgraded version of KOMUYU (real manual version of KOMUYU)

Demo effect

Third-party libraries that need to be installed:
pip install pygame # Load music
pip install pillow # Load pictures
pip install mediapipe # Model to determine gestures
pip install opencv # The model is used to process graphics

It is recommended that those with independent graphics and cameras try it!

I’m thinking of upgrading my gameplay. Only by knocking hard can you really accumulate merit! So I found a model that can judge gestures.

Source code (press Q to exit after turning on the camera)

import time
importtkinter
from tkinter import messagebox
import threading
import pygame # pip install pygame
from PIL import Image, ImageTk # pip install pillow
 
class window(tkinter.Tk):
        def __init__(self):
                super().__init__()
 
                #Initialize merit
                self.gongde=0
 
                # Prepare audio
                self.pygame=pygame
                self.pygame.mixer.init()
                self.pygame.mixer.music.load('knock.mp3')
 
                # Prepare images
                self.qiaomuyutupian=ImageTk.PhotoImage(file='KOMUYUTUPIAN.jpg') # Convert to a picture that can be used by tkinter
                self.qiaomuyutupian2=ImageTk.PhotoImage(file='KOMUYUTUPIAN2.jpg') # Convert to a picture that can be used by tkinter
 
                # Start interface
                self.base_top()
 
        def base_top(self):
                self.title('Knocking on wooden fish adds merit')
                self.geometry('410x400')
                self.configure(bg='black')
 
                # Label
                self.label1=tkinter.Label(self,text='Accumulate merit:' + str(self.gongde),font=('华文新伟',15),fg='white',bg ='black',width=18)
                self.label1.place(x=100,y=70)
 
                # button
                self.button1=tkinter.Button(self,image=self.qiaomuyutupian,relief='ridge',command=self.qiaomuyu)
                self.button1.place(x=100,y=100)
 
                # button
                self.button2=tkinter.Button(self,text='interaction',width=10,command=self.thread_hudong)
                self.button2.place(x=160,y=315)
 
                # information
                self.text1=tkinter.Text(self,width=10,height=5,bg='black',bd=0,foreground='white')
                self.text1.place(x=125,y=115)
 
        def showplus(self):
                # text float
                for i in range(4):
                        self.text1.insert('insert',' \\
')
                else:
                        self.text1.insert('insert',' merit + 1')
                for i in range(5):
                        time.sleep(0.04)
                        self.text1.delete(1.0, 2.0)
 
                # Merit + 1
                self.gongde=self.gongde + 1
                self.label1.config(text='Accumulate merit:' + str(self.gongde))
 
        def changetupian(self):
                self.button1.config(image=self.qiaomuyutupian2)
                time.sleep(0.1)
                self.button1.config(image=self.qiaomuyutupian)
 
        def qiaomuyu(self):
                # Multi-thread startup solves the delay. Although the delay is small enough, in order to be more effective
                th=threading.Thread(target=self.pygame.mixer.music.play)
                th.start()
 
                th2=threading.Thread(target=self.showplus)
                th2.start()
 
                th3=threading.Thread(target=self.changetupian)
                th3.start()
 
        def thread_hudong(self):
                th4=threading.Thread(target=self.hudong)
                th4.start()
 
                self.frame=tkinter.Frame(self,width=200,height=40,bg='white')
                self.frame.place(x=103,y=350)
 
                self.label2=tkinter.Label(self.frame,text='In the camera, please wait...',bg='white')
                self.label2.place(x=33,y=10)
 
        def hudong(self):
                import cv2
                import mediapipe as mp
 
                mp_hands = mp.solutions.hands
                hands = mp_hands.Hands()
                mp_drawing = mp.solutions.drawing_utils
 
                # coding:utf-8`
                cap = cv2.VideoCapture(0) # Turn on the camera
                mark_one=0
                while True:
                        self.frame.destroy()
 
                        ret, frame = cap.read() # Read video frame
                        if not ret:
                                break
                        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert color space
                        results = hands.process(image) # Gesture recognition
 
                        # Process recognition results
                        if results.multi_hand_landmarks:
                                for hand_landmarks in results.multi_hand_landmarks:
                                        mp_drawing.draw_landmarks(
                                                frame,
                                                hand_landmarks,
                                                mp_hands.HAND_CONNECTIONS) # Used to specify how landmarks are connected in the graph.
 
                                        for point in hand_landmarks.landmark:
                                                x = int(point.x * frame.shape[1])
                                                y = int(point.y * frame.shape[0])
                                                if y < 200:
                                                        mark_one=1
                                                if y > 400:
                                                        if 1 - mark_one == 0:
                                                                self.qiaomuyu()
                                                                mark_one=0
 
                                                cv2.circle(frame, (x, y), 5, (0, 255, 0), -1) # Draw key points
 
                        cv2.imshow('Gesture Recognition', frame) # Display results
                        if cv2.waitKey(1) & amp; 0xFF == ord('q'):
                                break
 
                cap.release()
                cv2.destroyAllWindows()
 
if __name__ == '__main__':
        top=window()
        top.mainloop()

The knowledge points of the article match the official knowledge files, and you can further learn relevant knowledge. Python entry skill treeHomepageOverview 386592 people are learning the system