commit 58f5be8dd3c0d76632943014c31fe39688e96fee Author: jimmy Date: Thu May 25 11:50:00 2023 +1200 Add Python code diff --git a/camera.py b/camera.py new file mode 100644 index 0000000..bfee9a4 --- /dev/null +++ b/camera.py @@ -0,0 +1,35 @@ +import cv2 +import cPickle +import socket +import struct + +TCP_IP = '127.0.0.1' +TCP_PORT = 9501 +video_file = 'some_file.MP4' + +sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # establishing a tcp connection +sock.bind((TCP_IP, TCP_PORT)) +sock.listen(5) + +while True: + (client_socket, client_address) = sock.accept() # wait for client + print 'connection established with ' +str(client_address) + cap = cv2.VideoCapture(video_file) + pos_frame = cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES) + while True: + flag, frame = cap.read() + if flag: + frame = cPickle.dumps(frame) + size = len(frame) + p = struct.pack('I', size) + frame = p + frame + client_socket.sendall(frame) + else: + cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, pos_frame-1) + + if cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES) == cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT): + size = 10 + p = struct.pack("I", size) + client_socket.send(p) + client_socket.send('') + break \ No newline at end of file diff --git a/client.py b/client.py new file mode 100644 index 0000000..64a88b1 --- /dev/null +++ b/client.py @@ -0,0 +1,34 @@ +import cv2 +import io +import socket +import struct +import time +import pickle +import zlib + +client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) +client_socket.connect(('127.0.0.1', 8485)) +connection = client_socket.makefile('wb') + +cam = cv2.VideoCapture(0) + +cam.set(3, 320); +cam.set(4, 240); + +img_counter = 0 + +encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90] + +while True: + ret, frame = cam.read() + result, frame = cv2.imencode('.jpg', frame, encode_param) +# data = zlib.compress(pickle.dumps(frame, 0)) + data = pickle.dumps(frame, 0) + size = len(data) + + + print("{}: {}".format(img_counter, size)) + client_socket.sendall(struct.pack(">L", size) + data) + img_counter += 1 + +cam.release() \ No newline at end of file diff --git a/hands.py b/hands.py new file mode 100644 index 0000000..e69de29 diff --git a/main.py b/main.py new file mode 100644 index 0000000..c1bd6b6 --- /dev/null +++ b/main.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python + +import math +import cv2 +import mediapipe as mp +mp_drawing = mp.solutions.drawing_utils +mp_drawing_styles = mp.solutions.drawing_styles +mp_hands = mp.solutions.hands + +# For webcam input: +# cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN) +# cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) +cap = cv2.VideoCapture(0) +count = 0 +with mp_hands.Hands( + model_complexity=0, + min_detection_confidence=0.5, + min_tracking_confidence=0.5, + max_num_hands=20) as hands: + framecount = 0 + previous_x = 0 + previous_y = 0 + timeout = 0 + while cap.isOpened(): + success, image = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + + # To improve performance, optionally mark the image as not writeable to + # pass by reference. + image.flags.writeable = False + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + results = hands.process(image) + + # Draw the hand annotations on the image. + image.flags.writeable = True + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + + + + if results.multi_hand_landmarks: + for hand_landmarks in results.multi_hand_landmarks: + x = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x + y = hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y + + + if (framecount % 5) == 0: + + # print(f'{abs(previous_x - x)} {abs(previous_y - y)}') + previous_x = x + previous_y = y + + if abs(previous_x - x) < 0.01 and abs(previous_y - y) < 0.01 and timeout <= 0: + print("Still") + cv2.circle(image,(int(x),int(y)), 63, (0,0,255), -1) + print(x, y) + timeout = 5 + timeout -= 1 + framecount += 1 + + + mp_drawing.draw_landmarks( + image, + hand_landmarks, + mp_hands.HAND_CONNECTIONS, + mp_drawing_styles.get_default_hand_landmarks_style(), + mp_drawing_styles.get_default_hand_connections_style()) + # Flip the image horizontally for a selfie-view display. + cv2.imshow('window', cv2.flip(image, 1)) + if cv2.waitKey(5) & 0xFF == ord('q'): + break +cap.release() + + diff --git a/server.py b/server.py new file mode 100644 index 0000000..e9124be --- /dev/null +++ b/server.py @@ -0,0 +1,43 @@ +import socket +import sys +import cv2 +import pickle +import numpy as np +import struct ## new +import zlib + +HOST='127.0.0.1' +PORT=8485 + +s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) +print('Socket created') + +s.bind((HOST,PORT)) +print('Socket bind complete') +s.listen(10) +print('Socket now listening') + +conn,addr=s.accept() + +data = b"" +payload_size = struct.calcsize(">L") +print("payload_size: {}".format(payload_size)) +while True: + while len(data) < payload_size: + print("Recv: {}".format(len(data))) + data += conn.recv(4096) + + print("Done Recv: {}".format(len(data))) + packed_msg_size = data[:payload_size] + data = data[payload_size:] + msg_size = struct.unpack(">L", packed_msg_size)[0] + print("msg_size: {}".format(msg_size)) + while len(data) < msg_size: + data += conn.recv(4096) + frame_data = data[:msg_size] + data = data[msg_size:] + + frame=pickle.loads(frame_data, fix_imports=True, encoding="bytes") + frame = cv2.imdecode(frame, cv2.IMREAD_COLOR) + cv2.imshow('ImageWindow',frame) + cv2.waitKey(1) diff --git a/udpserver.py b/udpserver.py new file mode 100644 index 0000000..3abca54 --- /dev/null +++ b/udpserver.py @@ -0,0 +1,11 @@ +import socket + +UDP_IP = "127.0.0.1" +UDP_PORT = 5005 + +sock = socket.socket(socket.AF_INET, # Internet + socket.SOCK_DGRAM) # UDP +sock.bind((UDP_IP, UDP_PORT)) +while True: + data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes + print("received message: %s" % data) \ No newline at end of file