#!/usr/bin/env python # coding: utf-8 # [![image](https://raw.githubusercontent.com/visual-layer/visuallayer/main/imgs/vl_horizontal_logo.png)](https://www.visual-layer.com) # # Working with survailance videos, captioning frames and detecting indoor/outdoor # In[13]: from IPython.display import Image # ## Download the fight detection survailance camera dataset # In[ ]: get_ipython().system('git clone https://github.com/seymanurakti/fight-detection-surv-dataset.git') # ## Install fastdup # In[1]: get_ipython().run_line_magic('pip', 'install fastdup') import fastdup # ## Extract videos into frames # In[4]: get_ipython().system('mkdir -p frames') fastdup.extract_video_frames('fight-detection-surv-dataset/fight/', 'frames/fight') fastdup.extract_video_frames('fight-detection-surv-dataset/noFight/', 'frames/no-fight') # ## Quick preview of video frames # In[8]: fight_files=get_ipython().getoutput("find frames/fight/ -name '*.jpg'") no_fight_files=get_ipython().getoutput("find frames/no-fight/ -name '*.jpg'") # In[9]: ret = fastdup.generate_sprite_image(fight_files, 55, ".")[0] from IPython.display import Image Image(filename=ret) # In[12]: ret = fastdup.generate_sprite_image(no_fight_files, 55, ".")[0] Image(filename=ret) # ## Build fastdup model # In[3]: # Cluster similar frames together fd = fastdup.create(input_dir='frames', work_dir='out_frames') fd.run(overwrite=True, model_path='dinov2s') # In[10]: fd.vis.component_gallery(num_images=10) # ## Look at video outliers, and caption them automatically # In[6]: fd.vis.outliers_gallery(num_images=10, label_col='blip') # In[25]: # In the above reports the captions are given under the "label" field name # ## Look at image outliers, and classify them into indoors/outdoors # In[10]: fd.vis.outliers_gallery(num_images=10, label_col='indoors_outdoors') # As can be seen the indoors/outdoors classifier is 90% correct on the above example # ## Wrap Up # # Next, feel free to check out other tutorials - # # + โšก [**Quickstart**](https://nbviewer.org/github/visual-layer/fastdup/blob/main/examples/quick-dataset-analysis.ipynb): Learn how to install fastdup, load a dataset and analyze it for potential issues such as duplicates/near-duplicates, broken images, outliers, dark/bright/blurry images, and view visually similar image clusters. If you're new, start here! # + ๐Ÿงน [**Clean Image Folder**](https://nbviewer.org/github/visual-layer/fastdup/blob/main/examples/cleaning-image-dataset.ipynb): Learn how to analyze and clean a folder of images from potential issues and export a list of problematic files for further action. If you have an unorganized folder of images, this is a good place to start. # + ๐Ÿ–ผ [**Analyze Image Classification Dataset**](https://nbviewer.org/github/visual-layer/fastdup/blob/main/examples/analyzing-image-classification-dataset.ipynb): Learn how to load a labeled image classification dataset and analyze for potential issues. If you have labeled ImageNet-style folder structure, have a go! # + ๐ŸŽ [**Analyze Object Detection Dataset**](https://nbviewer.org/github/visual-layer/fastdup/blob/main/examples/analyzing-object-detection-dataset.ipynb): Learn how to load bounding box annotations for object detection and analyze for potential issues. If you have a COCO-style labeled object detection dataset, give this example a try. # # ## VL Profiler # If you prefer a no-code platform to inspect and visualize your dataset, [**try our free cloud product VL Profiler**](https://app.visual-layer.com) - VL Profiler is our first no-code commercial product that lets you visualize and inspect your dataset in your browser. # # [Sign up](https://app.visual-layer.com) now, it's free. # # [![image](https://raw.githubusercontent.com/visual-layer/fastdup/main/gallery/vl_profiler_promo.svg)](https://app.visual-layer.com) # # As usual, feedback is welcome! # # Questions? Drop by our [Slack channel](https://visualdatabase.slack.com/join/shared_invite/zt-19jaydbjn-lNDEDkgvSI1QwbTXSY6dlA#/shared-invite/email) or open an issue on [GitHub](https://github.com/visual-layer/fastdup/issues). #