2个经典模型复现-Alexnet 和 yolov5

This commit is contained in:
sunny 2025-03-14 20:31:35 +08:00
parent 4bb66934b1
commit ad59c59c15
1559 changed files with 640 additions and 202 deletions

View File

View File

View File

View File

@ -1,252 +1,252 @@
# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
*.jpg
*.jpeg
*.png
*.bmp
*.tif
*.tiff
*.heic
*.JPG
*.JPEG
*.PNG
*.BMP
*.TIF
*.TIFF
*.HEIC
*.mp4
*.mov
*.MOV
*.avi
*.data
*.json
# # Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
# *.jpg
# *.jpeg
# *.png
# *.bmp
# *.tif
# *.tiff
# *.heic
# *.JPG
# *.JPEG
# *.PNG
# *.BMP
# *.TIF
# *.TIFF
# *.HEIC
# *.mp4
# *.mov
# *.MOV
# *.avi
# *.data
# *.json
*.cfg
!cfg/yolov3*.cfg
# *.cfg
# !cfg/yolov3*.cfg
storage.googleapis.com
runs/*
data/*
!data/images/zidane.jpg
!data/images/bus.jpg
!data/coco.names
!data/coco_paper.names
!data/coco.data
!data/coco_*.data
!data/coco_*.txt
!data/trainvalno5k.shapes
!data/*.sh
# storage.googleapis.com
# runs/*
# data/*
# !data/images/zidane.jpg
# !data/images/bus.jpg
# !data/coco.names
# !data/coco_paper.names
# !data/coco.data
# !data/coco_*.data
# !data/coco_*.txt
# !data/trainvalno5k.shapes
# !data/*.sh
pycocotools/*
results*.txt
gcp_test*.sh
# pycocotools/*
# results*.txt
# gcp_test*.sh
# Datasets -------------------------------------------------------------------------------------------------------------
coco/
coco128/
VOC/
# # Datasets -------------------------------------------------------------------------------------------------------------
# coco/
# coco128/
# VOC/
# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
*.m~
*.mat
!targets*.mat
# # MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
# *.m~
# *.mat
# !targets*.mat
# Neural Network weights -----------------------------------------------------------------------------------------------
*.weights
*.pt
*.onnx
*.mlmodel
*.torchscript
darknet53.conv.74
yolov3-tiny.conv.15
# # Neural Network weights -----------------------------------------------------------------------------------------------
# *.weights
# *.pt
# *.onnx
# *.mlmodel
# *.torchscript
# darknet53.conv.74
# yolov3-tiny.conv.15
# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
# # Byte-compiled / optimized / DLL files
# __pycache__/
# *.py[cod]
# *$py.class
# C extensions
*.so
# # C extensions
# *.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
wandb/
.installed.cfg
*.egg
# # Distribution / packaging
# .Python
# env/
# build/
# develop-eggs/
# dist/
# downloads/
# eggs/
# .eggs/
# lib/
# lib64/
# parts/
# sdist/
# var/
# wheels/
# *.egg-info/
# wandb/
# .installed.cfg
# *.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# # PyInstaller
# # Usually these files are written by a python script from a template
# # before PyInstaller builds the exe, so as to inject date/other infos into it.
# *.manifest
# *.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# # Installer logs
# pip-log.txt
# pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# # Unit test / coverage reports
# htmlcov/
# .tox/
# .coverage
# .coverage.*
# .cache
# nosetests.xml
# coverage.xml
# *.cover
# .hypothesis/
# Translations
*.mo
*.pot
# # Translations
# *.mo
# *.pot
# Django stuff:
*.log
local_settings.py
# # Django stuff:
# *.log
# local_settings.py
# Flask stuff:
instance/
.webassets-cache
# # Flask stuff:
# instance/
# .webassets-cache
# Scrapy stuff:
.scrapy
# # Scrapy stuff:
# .scrapy
# Sphinx documentation
docs/_build/
# # Sphinx documentation
# docs/_build/
# PyBuilder
target/
# # PyBuilder
# target/
# Jupyter Notebook
.ipynb_checkpoints
# # Jupyter Notebook
# .ipynb_checkpoints
# pyenv
.python-version
# # pyenv
# .python-version
# celery beat schedule file
celerybeat-schedule
# # celery beat schedule file
# celerybeat-schedule
# SageMath parsed files
*.sage.py
# # SageMath parsed files
# *.sage.py
# dotenv
.env
# # dotenv
# .env
# virtualenv
.venv*
venv*/
ENV*/
# # virtualenv
# .venv*
# venv*/
# ENV*/
# Spyder project settings
.spyderproject
.spyproject
# # Spyder project settings
# .spyderproject
# .spyproject
# Rope project settings
.ropeproject
# # Rope project settings
# .ropeproject
# mkdocs documentation
/site
# # mkdocs documentation
# /site
# mypy
.mypy_cache/
# # mypy
# .mypy_cache/
# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
# # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
# General
.DS_Store
.AppleDouble
.LSOverride
# # General
# .DS_Store
# .AppleDouble
# .LSOverride
# Icon must end with two \r
Icon
Icon?
# # Icon must end with two \r
# Icon
# Icon?
# Thumbnails
._*
# # Thumbnails
# ._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# # Files that might appear in the root of a volume
# .DocumentRevisions-V100
# .fseventsd
# .Spotlight-V100
# .TemporaryItems
# .Trashes
# .VolumeIcon.icns
# .com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# # Directories potentially created on remote AFP share
# .AppleDB
# .AppleDesktop
# Network Trash Folder
# Temporary Items
# .apdisk
# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
# # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff:
.idea/*
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
.html # Bokeh Plots
.pg # TensorFlow Frozen Graphs
.avi # videos
# # User-specific stuff:
# .idea/*
# .idea/**/workspace.xml
# .idea/**/tasks.xml
# .idea/dictionaries
# .html # Bokeh Plots
# .pg # TensorFlow Frozen Graphs
# .avi # videos
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# # Sensitive or high-churn files:
# .idea/**/dataSources/
# .idea/**/dataSources.ids
# .idea/**/dataSources.local.xml
# .idea/**/sqlDataSources.xml
# .idea/**/dynamic.xml
# .idea/**/uiDesigner.xml
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# # Gradle:
# .idea/**/gradle.xml
# .idea/**/libraries
# CMake
cmake-build-debug/
cmake-build-release/
# # CMake
# cmake-build-debug/
# cmake-build-release/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
# # Mongo Explorer plugin:
# .idea/**/mongoSettings.xml
## File-based project format:
*.iws
# ## File-based project format:
# *.iws
## Plugin-specific files:
# ## Plugin-specific files:
# IntelliJ
out/
# # IntelliJ
# out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# # mpeltonen/sbt-idea plugin
# .idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# # JIRA plugin
# atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# # Cursive Clojure plugin
# .idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# # Crashlytics plugin (for Android Studio and IntelliJ)
# com_crashlytics_export_strings.xml
# crashlytics.properties
# crashlytics-build.properties
# fabric.properties

Binary file not shown.

View File

@ -0,0 +1,21 @@
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
# Train command: python train.py --data argoverse_hd.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /argoverse
# /yolov5
# download command/URL (optional)
download: bash data/scripts/get_argoverse_hd.sh
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images
val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges
test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview
# number of classes
nc: 8
# class names
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ]

View File

@ -0,0 +1,35 @@
# COCO 2017 dataset http://cocodataset.org
# Train command: python train.py --data coco.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /coco
# /yolov5
# download command/URL (optional)
download: bash data/scripts/get_coco.sh
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ../coco/train2017.txt # 118287 images
val: ../coco/val2017.txt # 5000 images
test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes
nc: 80
# class names
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush' ]
# Print classes
# with open('data/coco.yaml') as f:
# d = yaml.load(f, Loader=yaml.FullLoader) # dict
# for i, x in enumerate(d['names']):
# print(i, x)

View File

@ -0,0 +1,30 @@
# COCO 2017 dataset http://cocodataset.org - first 128 training images
# Train command: python train.py --data coco128.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /coco128
# /yolov5
#
## download command/URL (optional)
download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ../coco128/images/train2017/ # 128 images
val: ../coco128/images/train2017/ # 128 images
# number of classes
nc: 80
# class names
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush' ]

View File

@ -0,0 +1,38 @@
# Hyperparameters for VOC finetuning
# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
# Hyperparameter Evolution Results
# Generations: 306
# P R mAP.5 mAP.5:.95 box obj cls
# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146
lr0: 0.0032
lrf: 0.12
momentum: 0.843
weight_decay: 0.00036
warmup_epochs: 2.0
warmup_momentum: 0.5
warmup_bias_lr: 0.05
box: 0.0296
cls: 0.243
cls_pw: 0.631
obj: 0.301
obj_pw: 0.911
iou_t: 0.2
anchor_t: 2.91
# anchors: 3.63
fl_gamma: 0.0
hsv_h: 0.0138
hsv_s: 0.664
hsv_v: 0.464
degrees: 0.373
translate: 0.245
scale: 0.898
shear: 0.602
perspective: 0.0
flipud: 0.00856
fliplr: 0.5
mosaic: 1.0
mixup: 0.243

View File

@ -0,0 +1,33 @@
# Hyperparameters for COCO training from scratch
# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
momentum: 0.937 # SGD momentum/Adam beta1
weight_decay: 0.0005 # optimizer weight decay 5e-4
warmup_epochs: 3.0 # warmup epochs (fractions ok)
warmup_momentum: 0.8 # warmup initial momentum
warmup_bias_lr: 0.1 # warmup initial bias lr
box: 0.05 # box loss gain
cls: 0.5 # cls loss gain
cls_pw: 1.0 # cls BCELoss positive_weight
obj: 1.0 # obj loss gain (scale with pixels)
obj_pw: 1.0 # obj BCELoss positive_weight
iou_t: 0.20 # IoU training threshold
anchor_t: 4.0 # anchor-multiple threshold
# anchors: 3 # anchors per output layer (0 to ignore)
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
degrees: 0.0 # image rotation (+/- deg)
translate: 0.1 # image translation (+/- fraction)
scale: 0.5 # image scale (+/- gain)
shear: 0.0 # image shear (+/- deg)
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
flipud: 0.0 # image flip up-down (probability)
fliplr: 0.5 # image flip left-right (probability)
mosaic: 1.0 # image mosaic (probability)
mixup: 0.0 # image mixup (probability)

Binary file not shown.

After

Width:  |  Height:  |  Size: 632 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 637 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 459 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 438 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 459 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 532 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 416 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 544 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 559 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 484 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 622 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 439 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 376 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 400 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 618 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 466 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 332 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 476 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

View File

@ -0,0 +1,32 @@
# COCO 2017 dataset http://cocodataset.org - first 128 training images
# Train command: python train.py --data coco128.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /coco128
# /yolov5
#
## download command/URL (optional)
#download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
#train: ../coco128/images/train2017/ # 128 images
#val: ../coco128/images/train2017/ # 128 images
train: mydata\images\train
val: mydata\images\train
# number of classes
#nc: 80
nc: 1
## class names
#names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
# 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
# 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
# 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
# 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
# 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
# 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
# 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
# 'hair drier', 'toothbrush' ]
names: ['wind']

View File

@ -0,0 +1,62 @@
#!/bin/bash
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
# Download command: bash data/scripts/get_argoverse_hd.sh
# Train command: python train.py --data argoverse_hd.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /argoverse
# /yolov5
# Download/unzip images
d='../argoverse/' # unzip directory
mkdir $d
url=https://argoverse-hd.s3.us-east-2.amazonaws.com/
f=Argoverse-HD-Full.zip
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background
wait # finish background tasks
cd ../argoverse/Argoverse-1.1/
ln -s tracking images
cd ../Argoverse-HD/annotations/
python3 - "$@" <<END
import json
from pathlib import Path
annotation_files = ["train.json", "val.json"]
print("Converting annotations to YOLOv5 format...")
for val in annotation_files:
a = json.load(open(val, "rb"))
label_dict = {}
for annot in a['annotations']:
img_id = annot['image_id']
img_name = a['images'][img_id]['name']
img_label_name = img_name[:-3] + "txt"
obj_class = annot['category_id']
x_center, y_center, width, height = annot['bbox']
x_center = (x_center + width / 2) / 1920. # offset and scale
y_center = (y_center + height / 2) / 1200. # offset and scale
width /= 1920. # scale
height /= 1200. # scale
img_dir = "./labels/" + a['seq_dirs'][a['images'][annot['image_id']]['sid']]
Path(img_dir).mkdir(parents=True, exist_ok=True)
if img_dir + "/" + img_label_name not in label_dict:
label_dict[img_dir + "/" + img_label_name] = []
label_dict[img_dir + "/" + img_label_name].append(f"{obj_class} {x_center} {y_center} {width} {height}\n")
for filename in label_dict:
with open(filename, "w") as file:
for string in label_dict[filename]:
file.write(string)
END
mv ./labels ../../Argoverse-1.1/

View File

@ -0,0 +1,27 @@
#!/bin/bash
# COCO 2017 dataset http://cocodataset.org
# Download command: bash data/scripts/get_coco.sh
# Train command: python train.py --data coco.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /coco
# /yolov5
# Download/unzip labels
d='../' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB
echo 'Downloading' $url$f ' ...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
# Download/unzip images
d='../coco/images' # unzip directory
url=http://images.cocodataset.org/zips/
f1='train2017.zip' # 19G, 118k images
f2='val2017.zip' # 1G, 5k images
f3='test2017.zip' # 7G, 41k images (optional)
for f in $f1 $f2; do
echo 'Downloading' $url$f '...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
done
wait # finish background tasks

View File

@ -0,0 +1,139 @@
#!/bin/bash
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
# Download command: bash data/scripts/get_voc.sh
# Train command: python train.py --data voc.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /VOC
# /yolov5
start=$(date +%s)
mkdir -p ../tmp
cd ../tmp/
# Download/unzip images and labels
d='.' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images
f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images
f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images
for f in $f3 $f2 $f1; do
echo 'Downloading' $url$f '...'
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
done
wait # finish background tasks
end=$(date +%s)
runtime=$((end - start))
echo "Completed in" $runtime "seconds"
echo "Splitting dataset..."
python3 - "$@" <<END
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
sets=[('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
def convert(size, box):
dw = 1./(size[0])
dh = 1./(size[1])
x = (box[0] + box[1])/2.0 - 1
y = (box[2] + box[3])/2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convert_annotation(year, image_id):
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id))
out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w')
tree=ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
wd = getcwd()
for year, image_set in sets:
if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)):
os.makedirs('VOCdevkit/VOC%s/labels/'%(year))
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split()
list_file = open('%s_%s.txt'%(year, image_set), 'w')
for image_id in image_ids:
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n'%(wd, year, image_id))
convert_annotation(year, image_id)
list_file.close()
END
cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt >train.txt
cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt
python3 - "$@" <<END
import shutil
import os
os.system('mkdir ../VOC/')
os.system('mkdir ../VOC/images')
os.system('mkdir ../VOC/images/train')
os.system('mkdir ../VOC/images/val')
os.system('mkdir ../VOC/labels')
os.system('mkdir ../VOC/labels/train')
os.system('mkdir ../VOC/labels/val')
import os
print(os.path.exists('../tmp/train.txt'))
f = open('../tmp/train.txt', 'r')
lines = f.readlines()
for line in lines:
line = "/".join(line.split('/')[-5:]).strip()
if (os.path.exists("../" + line)):
os.system("cp ../"+ line + " ../VOC/images/train")
line = line.replace('JPEGImages', 'labels')
line = line.replace('jpg', 'txt')
if (os.path.exists("../" + line)):
os.system("cp ../"+ line + " ../VOC/labels/train")
print(os.path.exists('../tmp/2007_test.txt'))
f = open('../tmp/2007_test.txt', 'r')
lines = f.readlines()
for line in lines:
line = "/".join(line.split('/')[-5:]).strip()
if (os.path.exists("../" + line)):
os.system("cp ../"+ line + " ../VOC/images/val")
line = line.replace('JPEGImages', 'labels')
line = line.replace('jpg', 'txt')
if (os.path.exists("../" + line)):
os.system("cp ../"+ line + " ../VOC/labels/val")
END
rm -rf ../tmp # remove temporary directory
echo "VOC download done."

View File

@ -0,0 +1,21 @@
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
# Train command: python train.py --data voc.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /VOC
# /yolov5
# download command/URL (optional)
download: bash data/scripts/get_voc.sh
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ../VOC/images/train/ # 16551 images
val: ../VOC/images/val/ # 4952 images
# number of classes
nc: 20
# class names
names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ]

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 630 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 570 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 407 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 430 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 448 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 452 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 472 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 447 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 563 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 540 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 611 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 473 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 571 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 484 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 514 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 542 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 459 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 437 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 390 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 432 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 524 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 435 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 473 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 497 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 497 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 554 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 389 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 497 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 506 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 335 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 550 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 333 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 335 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 364 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 387 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 481 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 548 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 596 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 552 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 474 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 571 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 264 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 351 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 276 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 346 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 342 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 330 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 435 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 364 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 273 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 246 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 502 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 512 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 362 KiB

Some files were not shown because too many files have changed in this diff Show More