#!/usr/bin/env python # coding: utf-8 # In[1]: get_ipython().system('pip install tensorflow matplotlib pandas') # In[2]: import tensorflow as tf import numpy as np from PIL import Image, ImageDraw, ImageOps import matplotlib.pyplot as plt import pandas as pd import os import cv2 pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) # In[3]: from src.pipeline.pose_engine import PoseEngine from src.pipeline.fall_detect import FallDetector # In[4]: def _fall_detect_config_movenet(): config = { 'model': { 'tflite': 'ai_models/lite-model_movenet_singlepose_thunder_3.tflite', }, 'labels': 'ai_models/pose_labels.txt', 'confidence_threshold': 0.6, 'model_name':'movenet' } return config # In[5]: def _fall_detect_config_mobilenet(): config = { 'model': { 'tflite': 'ai_models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite', }, 'labels': 'ai_models/pose_labels.txt', 'confidence_threshold': 0.6, 'model_name':'mobilenet' } return config # In[6]: movenet_config = _fall_detect_config_movenet() movenet_fall_detector = FallDetector(**movenet_config) movenet_pose_engine = PoseEngine(movenet_fall_detector._tfengine, movenet_config['model_name']) mobilenet_config = _fall_detect_config_mobilenet() mobilenet_fall_detector = FallDetector(**mobilenet_config) mobilenet_pose_engine = PoseEngine(mobilenet_fall_detector._tfengine, mobilenet_config['model_name']) # # Posenet Vs Movenet # In[7]: import os DATADIR = 'Images' comparison_df = pd.DataFrame() for template_path in os.listdir(DATADIR): template_image_src = Image.open(os.path.join(DATADIR,template_path)) _thumbnail, movenet_output_image, movenet_scoreList, movenet_inference_time = movenet_pose_engine.get_result(template_image_src) _thumbnail, posenet_output_image, posenet_scoreList, posenet_inference_time = mobilenet_pose_engine.get_result(template_image_src) movenet_scoreList['Inference_time'] = movenet_inference_time posenet_scoreList['Inference_time'] = posenet_inference_time scoreList_table = pd.DataFrame(data = [movenet_scoreList, posenet_scoreList], columns = ['LShoulder_score','RShoulder_score','LHip_score','RHip_score', 'Inference_time'], index=['movenet','posenet']) fig = plt.figure(figsize=(15,9)) fig.add_subplot(1, 3, 1) plt.imshow(_thumbnail) plt.axis("off") plt.title("Input Image") fig.add_subplot(1, 3, 2) plt.imshow(movenet_output_image) plt.axis("off") plt.title("MoveNet") fig.add_subplot(1, 3, 3) plt.imshow(posenet_output_image) plt.axis("off") plt.title("PoseNet") plt.show() print() print(scoreList_table) print() idx = scoreList_table.index.to_frame() idx.insert(0, 'image', template_path) idx.insert(1, 'model', ['movenet','posenet']) scoreList_table = scoreList_table.set_index(pd.MultiIndex.from_frame(idx),drop=True).droplevel(2) comparison_df = comparison_df.append(scoreList_table) # In[8]: comparison_df # In[ ]: # # Fall-Detection on Video Samples # In[9]: def show_frames(frame_list, frame_score): frame_list = frame_list[-50:] frame_to_display = len(frame_list) columns = 6 rows = (frame_to_display // columns) + 1 fig = plt.figure(figsize=(20, 42)) plt_cnt = 1 for i in range(0, len(frame_list)): fig.add_subplot(rows, columns, plt_cnt) plt.axis("off") plt_title = '' plt_title +='LS:'+str(frame_score[i]['LShoulder_score'])+'\n' plt_title +='RS:'+str(frame_score[i]['RShoulder_score'])+'\n' plt_title +='LH:'+str(frame_score[i]['LHip_score'])+'\n' plt_title +='RH:'+str(frame_score[i]['RHip_score'])+'\n' plt.title(plt_title) plt.imshow(frame_list[i]) plt_cnt += 1 # In[10]: def movenet_execution_for_video(video_path): cam = cv2.VideoCapture(video_path) currentframe = 0 output_frame_sequences = [] output_frame_score = [] while(True): ret,frame = cam.read() if ret: name = str(currentframe) + '.jpg' currentframe += 1 template_image_src = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) template_image_src = Image.fromarray(template_image_src) _thumbnail, movenet_output_image, movenet_scoreList, movenet_inference_time = movenet_pose_engine.get_result(template_image_src) temp_score = sum([ 1 if e> 0.5 else 0 for e in list(movenet_scoreList.values())]) output_frame_sequences.append(movenet_output_image) output_frame_score.append(movenet_scoreList) else: break cam.release() cv2.destroyAllWindows() show_frames(output_frame_sequences, output_frame_score) # In[11]: def posenet_execution_for_video(video_path): cam = cv2.VideoCapture(video_path) currentframe = 0 output_frame_sequences = [] output_frame_score = [] while(True): ret,frame = cam.read() if ret: name = str(currentframe) + '.jpg' currentframe += 1 template_image_src = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) template_image_src = Image.fromarray(template_image_src) _thumbnail, posenet_output_image, posenet_scoreList, posenet_inference_time = mobilenet_pose_engine.get_result(template_image_src) temp_score = sum([ 1 if e> 0.5 else 0 for e in list(posenet_scoreList.values())]) output_frame_sequences.append(posenet_output_image) output_frame_score.append(posenet_scoreList) else: break cam.release() cv2.destroyAllWindows() show_frames(output_frame_sequences, output_frame_score) # # video -1 # In[12]: print("Posenet \n") posenet_execution_for_video('videos/video_1.mp4') # In[13]: print("Movenet \n") movenet_execution_for_video('videos/video_1.mp4') # # video - 2 # In[14]: print("Posenet \n") posenet_execution_for_video('videos/video_2.mp4') # In[15]: print("Movenet \n") movenet_execution_for_video('videos/video_2.mp4') # # video - 3 # In[16]: print("Posenet \n") posenet_execution_for_video('videos/video_3.mp4') # In[17]: print("Movenet \n") movenet_execution_for_video('videos/video_3.mp4') # # video - 4 # In[18]: print("Posenet \n") posenet_execution_for_video('videos/video_4.mp4') # In[19]: print("Movenet \n") movenet_execution_for_video('videos/video_4.mp4') # # video - 5 # In[20]: print("Posenet \n") posenet_execution_for_video('videos/video_5.mp4') # In[21]: print("Movenet \n") movenet_execution_for_video('videos/video_5.mp4') # In[ ]: # In[ ]: