From 754bbf7a25a8dda49b5d08ef0d0443bbf5af0e36 Mon Sep 17 00:00:00 2001 From: Craig Jennings Date: Sun, 7 Apr 2024 13:41:34 -0500 Subject: new repository --- ...ompose%2Fcli-command-compatibility%2Findex.html | 37 + .../compose%2Fcompose-file%2Fbuild%2Findex.html | 97 ++ ...e%2Fcompose-file%2Fcompose-file-v2%2Findex.html | 679 ++++++++++++++ ...e%2Fcompose-file%2Fcompose-file-v3%2Findex.html | 996 +++++++++++++++++++++ ...Fcompose-file%2Fcompose-versioning%2Findex.html | 151 ++++ .../compose%2Fcompose-file%2Fdeploy%2Findex.html | 126 +++ devdocs/docker/compose%2Fcompose-file%2Findex.html | 812 +++++++++++++++++ devdocs/docker/compose%2Fenv-file%2Findex.html | 11 + .../compose%2Fenvironment-variables%2Findex.html | 95 ++ devdocs/docker/compose%2Fextends%2Findex.html | 192 ++++ devdocs/docker/compose%2Ffaq%2Findex.html | 15 + .../docker/compose%2Fgettingstarted%2Findex.html | 123 +++ devdocs/docker/compose%2Fgpu-support%2Findex.html | 119 +++ devdocs/docker/compose%2Findex.html | 28 + devdocs/docker/compose%2Finstall%2Findex.html | 36 + devdocs/docker/compose%2Fnetworking%2Findex.html | 82 ++ devdocs/docker/compose%2Fproduction%2Findex.html | 13 + devdocs/docker/compose%2Fprofiles%2Findex.html | 88 ++ .../compose%2Freference%2Fconfig%2Findex.html | 21 + .../compose%2Freference%2Fenvvars%2Findex.html | 10 + devdocs/docker/compose%2Freference%2Findex.html | 96 ++ .../docker/compose%2Freference%2Fpull%2Findex.html | 48 + .../docker/compose%2Freference%2Fstop%2Findex.html | 14 + .../docker/compose%2Freference%2Fup%2Findex.html | 43 + .../compose%2Fsamples-for-compose%2Findex.html | 11 + .../docker/compose%2Fstartup-order%2Findex.html | 37 + devdocs/docker/engine%2Fapi%2Findex.html | 31 + .../engine%2Fapi%2Fsdk%2Fexamples%2Findex.html | 435 +++++++++ devdocs/docker/engine%2Fapi%2Fsdk%2Findex.html | 81 ++ ...%2Fcontext%2Fworking-with-contexts%2Findex.html | 101 +++ devdocs/docker/engine%2Fdeprecated%2Findex.html | 82 ++ .../docker/engine%2Fextend%2Fconfig%2Findex.html | 55 ++ devdocs/docker/engine%2Fextend%2Findex.html | 89 ++ .../engine%2Fextend%2Flegacy_plugins%2Findex.html | 9 + .../engine%2Fextend%2Fplugin_api%2Findex.html | 43 + ...e%2Fextend%2Fplugins_authorization%2Findex.html | 48 + .../engine%2Fextend%2Fplugins_network%2Findex.html | 10 + .../engine%2Fextend%2Fplugins_volume%2Findex.html | 98 ++ devdocs/docker/engine%2Findex.html | 9 + .../engine%2Finstall%2Fbinaries%2Findex.html | 26 + .../docker/engine%2Finstall%2Fcentos%2Findex.html | 48 + .../docker/engine%2Finstall%2Fdebian%2Findex.html | 47 + .../docker/engine%2Finstall%2Ffedora%2Findex.html | 50 ++ devdocs/docker/engine%2Finstall%2Findex.html | 10 + ...gine%2Finstall%2Flinux-postinstall%2Findex.html | 76 ++ .../docker/engine%2Finstall%2Frhel%2Findex.html | 49 + .../docker/engine%2Finstall%2Fsles%2Findex.html | 46 + .../docker/engine%2Finstall%2Fubuntu%2Findex.html | 47 + .../engine%2Freference%2Fbuilder%2Findex.html | 527 +++++++++++ ...2Freference%2Fcommandline%2Fattach%2Findex.html | 65 ++ ...%2Freference%2Fcommandline%2Fbuild%2Findex.html | 180 ++++ ...Freference%2Fcommandline%2Fbuilder%2Findex.html | 7 + ...ence%2Fcommandline%2Fbuilder_build%2Findex.html | 22 + ...ence%2Fcommandline%2Fbuilder_prune%2Findex.html | 11 + ...ference%2Fcommandline%2Fcheckpoint%2Findex.html | 30 + ...%2Fcommandline%2Fcheckpoint_create%2Findex.html | 7 + ...ence%2Fcommandline%2Fcheckpoint_ls%2Findex.html | 7 + ...ence%2Fcommandline%2Fcheckpoint_rm%2Findex.html | 7 + ...ne%2Freference%2Fcommandline%2Fcli%2Findex.html | 104 +++ ...2Freference%2Fcommandline%2Fcommit%2Findex.html | 66 ++ ...2Freference%2Fcommandline%2Fconfig%2Findex.html | 7 + ...ence%2Fcommandline%2Fconfig_create%2Findex.html | 51 ++ ...nce%2Fcommandline%2Fconfig_inspect%2Findex.html | 35 + ...eference%2Fcommandline%2Fconfig_ls%2Findex.html | 48 + ...eference%2Fcommandline%2Fconfig_rm%2Findex.html | 9 + ...eference%2Fcommandline%2Fcontainer%2Findex.html | 7 + ...e%2Fcommandline%2Fcontainer_attach%2Findex.html | 7 + ...e%2Fcommandline%2Fcontainer_commit%2Findex.html | 15 + ...rence%2Fcommandline%2Fcontainer_cp%2Findex.html | 12 + ...e%2Fcommandline%2Fcontainer_create%2Findex.html | 35 + ...nce%2Fcommandline%2Fcontainer_diff%2Findex.html | 7 + ...nce%2Fcommandline%2Fcontainer_exec%2Findex.html | 19 + ...e%2Fcommandline%2Fcontainer_export%2Findex.html | 9 + ...%2Fcommandline%2Fcontainer_inspect%2Findex.html | 11 + ...nce%2Fcommandline%2Fcontainer_kill%2Findex.html | 9 + ...nce%2Fcommandline%2Fcontainer_logs%2Findex.html | 13 + ...rence%2Fcommandline%2Fcontainer_ls%2Findex.html | 19 + ...ce%2Fcommandline%2Fcontainer_pause%2Findex.html | 7 + ...nce%2Fcommandline%2Fcontainer_port%2Findex.html | 7 + ...ce%2Fcommandline%2Fcontainer_prune%2Findex.html | 51 ++ ...e%2Fcommandline%2Fcontainer_rename%2Findex.html | 7 + ...%2Fcommandline%2Fcontainer_restart%2Findex.html | 9 + ...rence%2Fcommandline%2Fcontainer_rm%2Findex.html | 13 + ...ence%2Fcommandline%2Fcontainer_run%2Findex.html | 37 + ...ce%2Fcommandline%2Fcontainer_start%2Findex.html | 13 + ...ce%2Fcommandline%2Fcontainer_stats%2Findex.html | 9 + ...nce%2Fcommandline%2Fcontainer_stop%2Findex.html | 9 + ...ence%2Fcommandline%2Fcontainer_top%2Findex.html | 7 + ...%2Fcommandline%2Fcontainer_unpause%2Findex.html | 7 + ...e%2Fcommandline%2Fcontainer_update%2Findex.html | 12 + ...nce%2Fcommandline%2Fcontainer_wait%2Findex.html | 7 + ...Freference%2Fcommandline%2Fcontext%2Findex.html | 7 + ...nce%2Fcommandline%2Fcontext_create%2Findex.html | 25 + ...nce%2Fcommandline%2Fcontext_export%2Findex.html | 8 + ...nce%2Fcommandline%2Fcontext_import%2Findex.html | 7 + ...ce%2Fcommandline%2Fcontext_inspect%2Findex.html | 42 + ...ference%2Fcommandline%2Fcontext_ls%2Findex.html | 15 + ...ference%2Fcommandline%2Fcontext_rm%2Findex.html | 9 + ...nce%2Fcommandline%2Fcontext_update%2Findex.html | 13 + ...erence%2Fcommandline%2Fcontext_use%2Findex.html | 7 + ...ine%2Freference%2Fcommandline%2Fcp%2Findex.html | 32 + ...2Freference%2Fcommandline%2Fcreate%2Findex.html | 69 ++ ...e%2Freference%2Fcommandline%2Fdiff%2Findex.html | 28 + ...2Freference%2Fcommandline%2Fdocker%2Findex.html | 6 + ...Freference%2Fcommandline%2Fdockerd%2Findex.html | 428 +++++++++ ...2Freference%2Fcommandline%2Fevents%2Findex.html | 171 ++++ ...e%2Freference%2Fcommandline%2Fexec%2Findex.html | 42 + ...2Freference%2Fcommandline%2Fexport%2Findex.html | 11 + ...Freference%2Fcommandline%2Fhistory%2Findex.html | 30 + ...%2Freference%2Fcommandline%2Fimage%2Findex.html | 7 + ...erence%2Fcommandline%2Fimage_build%2Findex.html | 22 + ...ence%2Fcommandline%2Fimage_history%2Findex.html | 11 + ...rence%2Fcommandline%2Fimage_import%2Findex.html | 11 + ...ence%2Fcommandline%2Fimage_inspect%2Findex.html | 9 + ...ference%2Fcommandline%2Fimage_load%2Findex.html | 11 + ...reference%2Fcommandline%2Fimage_ls%2Findex.html | 13 + ...erence%2Fcommandline%2Fimage_prune%2Findex.html | 102 +++ ...ference%2Fcommandline%2Fimage_pull%2Findex.html | 11 + ...ference%2Fcommandline%2Fimage_push%2Findex.html | 11 + ...reference%2Fcommandline%2Fimage_rm%2Findex.html | 9 + ...ference%2Fcommandline%2Fimage_save%2Findex.html | 9 + ...eference%2Fcommandline%2Fimage_tag%2Findex.html | 7 + ...2Freference%2Fcommandline%2Fimages%2Findex.html | 136 +++ ...2Freference%2Fcommandline%2Fimport%2Findex.html | 17 + ...e%2Freference%2Fcommandline%2Finfo%2Findex.html | 186 ++++ ...Freference%2Fcommandline%2Finspect%2Findex.html | 18 + ...e%2Freference%2Fcommandline%2Fkill%2Findex.html | 14 + ...e%2Freference%2Fcommandline%2Fload%2Findex.html | 36 + ...%2Freference%2Fcommandline%2Flogin%2Findex.html | 33 + ...2Freference%2Fcommandline%2Flogout%2Findex.html | 8 + ...e%2Freference%2Fcommandline%2Flogs%2Findex.html | 20 + ...reference%2Fcommandline%2Fmanifest%2Findex.html | 154 ++++ ...%2Fcommandline%2Fmanifest_annotate%2Findex.html | 7 + ...ce%2Fcommandline%2Fmanifest_create%2Findex.html | 9 + ...e%2Fcommandline%2Fmanifest_inspect%2Findex.html | 9 + ...ence%2Fcommandline%2Fmanifest_push%2Findex.html | 9 + ...erence%2Fcommandline%2Fmanifest_rm%2Findex.html | 7 + ...Freference%2Fcommandline%2Fnetwork%2Findex.html | 7 + ...ce%2Fcommandline%2Fnetwork_connect%2Findex.html | 14 + ...nce%2Fcommandline%2Fnetwork_create%2Findex.html | 38 + ...2Fcommandline%2Fnetwork_disconnect%2Findex.html | 10 + ...ce%2Fcommandline%2Fnetwork_inspect%2Findex.html | 11 + ...ference%2Fcommandline%2Fnetwork_ls%2Findex.html | 72 ++ ...ence%2Fcommandline%2Fnetwork_prune%2Findex.html | 37 + ...ference%2Fcommandline%2Fnetwork_rm%2Findex.html | 9 + ...e%2Freference%2Fcommandline%2Fnode%2Findex.html | 7 + ...erence%2Fcommandline%2Fnode_demote%2Findex.html | 8 + ...rence%2Fcommandline%2Fnode_inspect%2Findex.html | 119 +++ ...Freference%2Fcommandline%2Fnode_ls%2Findex.html | 58 ++ ...rence%2Fcommandline%2Fnode_promote%2Findex.html | 8 + ...Freference%2Fcommandline%2Fnode_ps%2Findex.html | 41 + ...Freference%2Fcommandline%2Fnode_rm%2Findex.html | 19 + ...erence%2Fcommandline%2Fnode_update%2Findex.html | 10 + ...%2Freference%2Fcommandline%2Fpause%2Findex.html | 8 + ...2Freference%2Fcommandline%2Fplugin%2Findex.html | 7 + ...ence%2Fcommandline%2Fplugin_create%2Findex.html | 21 + ...nce%2Fcommandline%2Fplugin_disable%2Findex.html | 21 + ...ence%2Fcommandline%2Fplugin_enable%2Findex.html | 19 + ...nce%2Fcommandline%2Fplugin_inspect%2Findex.html | 118 +++ ...nce%2Fcommandline%2Fplugin_install%2Findex.html | 19 + ...eference%2Fcommandline%2Fplugin_ls%2Findex.html | 25 + ...erence%2Fcommandline%2Fplugin_push%2Findex.html | 13 + ...eference%2Fcommandline%2Fplugin_rm%2Findex.html | 16 + ...ference%2Fcommandline%2Fplugin_set%2Findex.html | 39 + ...nce%2Fcommandline%2Fplugin_upgrade%2Findex.html | 52 ++ ...e%2Freference%2Fcommandline%2Fport%2Findex.html | 28 + ...ine%2Freference%2Fcommandline%2Fps%2Findex.html | 172 ++++ ...e%2Freference%2Fcommandline%2Fpull%2Findex.html | 72 ++ ...e%2Freference%2Fcommandline%2Fpush%2Findex.html | 39 + ...2Freference%2Fcommandline%2Frename%2Findex.html | 8 + ...Freference%2Fcommandline%2Frestart%2Findex.html | 10 + ...ine%2Freference%2Fcommandline%2Frm%2Findex.html | 30 + ...ne%2Freference%2Fcommandline%2Frmi%2Findex.html | 61 ++ ...ne%2Freference%2Fcommandline%2Frun%2Findex.html | 196 ++++ ...e%2Freference%2Fcommandline%2Fsave%2Findex.html | 26 + ...2Freference%2Fcommandline%2Fsearch%2Findex.html | 79 ++ ...2Freference%2Fcommandline%2Fsecret%2Findex.html | 7 + ...ence%2Fcommandline%2Fsecret_create%2Findex.html | 52 ++ ...nce%2Fcommandline%2Fsecret_inspect%2Findex.html | 34 + ...eference%2Fcommandline%2Fsecret_ls%2Findex.html | 48 + ...eference%2Fcommandline%2Fsecret_rm%2Findex.html | 9 + ...Freference%2Fcommandline%2Fservice%2Findex.html | 7 + ...nce%2Fcommandline%2Fservice_create%2Findex.html | 229 +++++ ...ce%2Fcommandline%2Fservice_inspect%2Findex.html | 89 ++ ...rence%2Fcommandline%2Fservice_logs%2Findex.html | 13 + ...ference%2Fcommandline%2Fservice_ls%2Findex.html | 40 + ...ference%2Fcommandline%2Fservice_ps%2Findex.html | 64 ++ ...ference%2Fcommandline%2Fservice_rm%2Findex.html | 14 + ...e%2Fcommandline%2Fservice_rollback%2Findex.html | 28 + ...ence%2Fcommandline%2Fservice_scale%2Findex.html | 35 + ...nce%2Fcommandline%2Fservice_update%2Findex.html | 83 ++ ...%2Freference%2Fcommandline%2Fstack%2Findex.html | 9 + ...rence%2Fcommandline%2Fstack_deploy%2Findex.html | 60 ++ ...reference%2Fcommandline%2Fstack_ls%2Findex.html | 19 + ...reference%2Fcommandline%2Fstack_ps%2Findex.html | 100 +++ ...reference%2Fcommandline%2Fstack_rm%2Findex.html | 32 + ...nce%2Fcommandline%2Fstack_services%2Findex.html | 28 + ...%2Freference%2Fcommandline%2Fstart%2Findex.html | 14 + ...%2Freference%2Fcommandline%2Fstats%2Findex.html | 62 ++ ...e%2Freference%2Fcommandline%2Fstop%2Findex.html | 10 + ...%2Freference%2Fcommandline%2Fswarm%2Findex.html | 7 + ...reference%2Fcommandline%2Fswarm_ca%2Findex.html | 41 + ...ference%2Fcommandline%2Fswarm_init%2Findex.html | 29 + ...ference%2Fcommandline%2Fswarm_join%2Findex.html | 22 + ...e%2Fcommandline%2Fswarm_join-token%2Findex.html | 36 + ...erence%2Fcommandline%2Fswarm_leave%2Findex.html | 18 + ...rence%2Fcommandline%2Fswarm_unlock%2Findex.html | 9 + ...e%2Fcommandline%2Fswarm_unlock-key%2Findex.html | 32 + ...rence%2Fcommandline%2Fswarm_update%2Findex.html | 8 + ...2Freference%2Fcommandline%2Fsystem%2Findex.html | 7 + ...eference%2Fcommandline%2Fsystem_df%2Findex.html | 41 + ...ence%2Fcommandline%2Fsystem_events%2Findex.html | 146 +++ ...erence%2Fcommandline%2Fsystem_info%2Findex.html | 9 + ...rence%2Fcommandline%2Fsystem_prune%2Findex.html | 72 ++ ...ne%2Freference%2Fcommandline%2Ftag%2Findex.html | 11 + ...ne%2Freference%2Fcommandline%2Ftop%2Findex.html | 7 + ...%2Freference%2Fcommandline%2Ftrust%2Findex.html | 7 + ...ence%2Fcommandline%2Ftrust_inspect%2Findex.html | 359 ++++++++ ...eference%2Fcommandline%2Ftrust_key%2Findex.html | 7 + ...2Fcommandline%2Ftrust_key_generate%2Findex.html | 23 + ...nce%2Fcommandline%2Ftrust_key_load%2Findex.html | 19 + ...rence%2Fcommandline%2Ftrust_revoke%2Findex.html | 71 ++ ...ference%2Fcommandline%2Ftrust_sign%2Findex.html | 80 ++ ...rence%2Fcommandline%2Ftrust_signer%2Findex.html | 7 + ...e%2Fcommandline%2Ftrust_signer_add%2Findex.html | 38 + ...Fcommandline%2Ftrust_signer_remove%2Findex.html | 114 +++ ...Freference%2Fcommandline%2Funpause%2Findex.html | 9 + ...2Freference%2Fcommandline%2Fupdate%2Findex.html | 18 + ...Freference%2Fcommandline%2Fversion%2Findex.html | 64 ++ ...2Freference%2Fcommandline%2Fvolume%2Findex.html | 7 + ...ence%2Fcommandline%2Fvolume_create%2Findex.html | 35 + ...nce%2Fcommandline%2Fvolume_inspect%2Findex.html | 27 + ...eference%2Fcommandline%2Fvolume_ls%2Findex.html | 63 ++ ...rence%2Fcommandline%2Fvolume_prune%2Findex.html | 18 + ...eference%2Fcommandline%2Fvolume_rm%2Findex.html | 12 + ...e%2Freference%2Fcommandline%2Fwait%2Findex.html | 13 + .../docker/engine%2Freference%2Frun%2Findex.html | 333 +++++++ devdocs/docker/engine%2Frelease-notes%2Findex.html | 217 +++++ devdocs/docker/engine%2Fscan%2Findex.html | 275 ++++++ .../engine%2Fsecurity%2Fapparmor%2Findex.html | 134 +++ .../engine%2Fsecurity%2Fcertificates%2Findex.html | 24 + devdocs/docker/engine%2Fsecurity%2Findex.html | 11 + ...engine%2Fsecurity%2Fprotect-access%2Findex.html | 117 +++ .../engine%2Fsecurity%2Frootless%2Findex.html | 123 +++ .../engine%2Fsecurity%2Fseccomp%2Findex.html | 18 + ...ecurity%2Ftrust%2Fdeploying_notary%2Findex.html | 11 + .../docker/engine%2Fsecurity%2Ftrust%2Findex.html | 69 ++ ...ecurity%2Ftrust%2Ftrust_automation%2Findex.html | 48 + ...ecurity%2Ftrust%2Ftrust_delegation%2Findex.html | 197 ++++ ...2Fsecurity%2Ftrust%2Ftrust_key_mng%2Findex.html | 12 + ...2Fsecurity%2Ftrust%2Ftrust_sandbox%2Findex.html | 123 +++ .../engine%2Fsecurity%2Fuserns-remap%2Findex.html | 46 + .../engine%2Fswarm%2Fadmin_guide%2Findex.html | 33 + .../docker/engine%2Fswarm%2Fconfigs%2Findex.html | 236 +++++ ...arm%2Fhow-swarm-mode-works%2Fnodes%2Findex.html | 11 + ...swarm%2Fhow-swarm-mode-works%2Fpki%2Findex.html | 22 + ...%2Fhow-swarm-mode-works%2Fservices%2Findex.html | 10 + devdocs/docker/engine%2Fswarm%2Findex.html | 10 + .../docker/engine%2Fswarm%2Fingress%2Findex.html | 66 ++ .../engine%2Fswarm%2Fjoin-nodes%2Findex.html | 36 + .../engine%2Fswarm%2Fkey-concepts%2Findex.html | 10 + .../engine%2Fswarm%2Fmanage-nodes%2Findex.html | 64 ++ devdocs/docker/engine%2Fswarm%2Fraft%2Findex.html | 13 + .../docker/engine%2Fswarm%2Fsecrets%2Findex.html | 333 +++++++ .../docker/engine%2Fswarm%2Fservices%2Findex.html | 154 ++++ .../engine%2Fswarm%2Fstack-deploy%2Findex.html | 127 +++ .../engine%2Fswarm%2Fswarm-mode%2Findex.html | 55 ++ ...swarm%2Fswarm-tutorial%2Fadd-nodes%2Findex.html | 32 + ...rm%2Fswarm-tutorial%2Fcreate-swarm%2Findex.html | 40 + ...%2Fswarm-tutorial%2Fdelete-service%2Findex.html | 27 + ...%2Fswarm-tutorial%2Fdeploy-service%2Findex.html | 16 + ...warm%2Fswarm-tutorial%2Fdrain-node%2Findex.html | 54 ++ .../engine%2Fswarm%2Fswarm-tutorial%2Findex.html | 13 + ...2Fswarm-tutorial%2Finspect-service%2Findex.html | 78 ++ ...%2Fswarm-tutorial%2Frolling-update%2Findex.html | 69 ++ ...m%2Fswarm-tutorial%2Fscale-service%2Findex.html | 25 + ...ne%2Fswarm%2Fswarm_manager_locking%2Findex.html | 65 ++ .../docker/get-started%2F02_our_app%2Findex.html | 20 + .../get-started%2F04_sharing_app%2Findex.html | 16 + devdocs/docker/get-started%2Findex.html | 15 + .../docker/get-started%2Fkube-deploy%2Findex.html | 56 ++ .../get-started%2Forchestration%2Findex.html | 88 ++ devdocs/docker/get-started%2Foverview%2Findex.html | 11 + .../docker/get-started%2Fresources%2Findex.html | 24 + .../docker/get-started%2Fswarm-deploy%2Findex.html | 24 + devdocs/docker/index | 1 + devdocs/docker/index.html | 6 + devdocs/docker/machine%2Findex.html | 9 + devdocs/docker/metadata | 4 + 289 files changed, 16607 insertions(+) create mode 100644 devdocs/docker/compose%2Fcli-command-compatibility%2Findex.html create mode 100644 devdocs/docker/compose%2Fcompose-file%2Fbuild%2Findex.html create mode 100644 devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v2%2Findex.html create mode 100644 devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v3%2Findex.html create mode 100644 devdocs/docker/compose%2Fcompose-file%2Fcompose-versioning%2Findex.html create mode 100644 devdocs/docker/compose%2Fcompose-file%2Fdeploy%2Findex.html create mode 100644 devdocs/docker/compose%2Fcompose-file%2Findex.html create mode 100644 devdocs/docker/compose%2Fenv-file%2Findex.html create mode 100644 devdocs/docker/compose%2Fenvironment-variables%2Findex.html create mode 100644 devdocs/docker/compose%2Fextends%2Findex.html create mode 100644 devdocs/docker/compose%2Ffaq%2Findex.html create mode 100644 devdocs/docker/compose%2Fgettingstarted%2Findex.html create mode 100644 devdocs/docker/compose%2Fgpu-support%2Findex.html create mode 100644 devdocs/docker/compose%2Findex.html create mode 100644 devdocs/docker/compose%2Finstall%2Findex.html create mode 100644 devdocs/docker/compose%2Fnetworking%2Findex.html create mode 100644 devdocs/docker/compose%2Fproduction%2Findex.html create mode 100644 devdocs/docker/compose%2Fprofiles%2Findex.html create mode 100644 devdocs/docker/compose%2Freference%2Fconfig%2Findex.html create mode 100644 devdocs/docker/compose%2Freference%2Fenvvars%2Findex.html create mode 100644 devdocs/docker/compose%2Freference%2Findex.html create mode 100644 devdocs/docker/compose%2Freference%2Fpull%2Findex.html create mode 100644 devdocs/docker/compose%2Freference%2Fstop%2Findex.html create mode 100644 devdocs/docker/compose%2Freference%2Fup%2Findex.html create mode 100644 devdocs/docker/compose%2Fsamples-for-compose%2Findex.html create mode 100644 devdocs/docker/compose%2Fstartup-order%2Findex.html create mode 100644 devdocs/docker/engine%2Fapi%2Findex.html create mode 100644 devdocs/docker/engine%2Fapi%2Fsdk%2Fexamples%2Findex.html create mode 100644 devdocs/docker/engine%2Fapi%2Fsdk%2Findex.html create mode 100644 devdocs/docker/engine%2Fcontext%2Fworking-with-contexts%2Findex.html create mode 100644 devdocs/docker/engine%2Fdeprecated%2Findex.html create mode 100644 devdocs/docker/engine%2Fextend%2Fconfig%2Findex.html create mode 100644 devdocs/docker/engine%2Fextend%2Findex.html create mode 100644 devdocs/docker/engine%2Fextend%2Flegacy_plugins%2Findex.html create mode 100644 devdocs/docker/engine%2Fextend%2Fplugin_api%2Findex.html create mode 100644 devdocs/docker/engine%2Fextend%2Fplugins_authorization%2Findex.html create mode 100644 devdocs/docker/engine%2Fextend%2Fplugins_network%2Findex.html create mode 100644 devdocs/docker/engine%2Fextend%2Fplugins_volume%2Findex.html create mode 100644 devdocs/docker/engine%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Fbinaries%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Fcentos%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Fdebian%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Ffedora%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Flinux-postinstall%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Frhel%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Fsles%2Findex.html create mode 100644 devdocs/docker/engine%2Finstall%2Fubuntu%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fbuilder%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fattach%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fbuild%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_build%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_prune%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcli%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcommit%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_attach%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_commit%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_cp%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_diff%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_exec%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_export%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_kill%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_logs%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_pause%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_port%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_prune%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rename%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_restart%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_run%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_start%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stats%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stop%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_top%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_unpause%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_update%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_wait%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_export%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_import%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_update%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_use%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcp%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fcreate%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fdiff%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fdocker%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fdockerd%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fevents%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fexec%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fexport%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fhistory%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_build%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_history%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_import%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_load%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_prune%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_pull%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_push%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_save%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_tag%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimages%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fimport%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Finfo%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Finspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fkill%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fload%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Flogin%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Flogout%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Flogs%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_annotate%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_push%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_connect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_disconnect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_prune%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_demote%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_promote%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ps%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_update%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fpause%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_disable%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_enable%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_install%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_push%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_set%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_upgrade%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fport%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fps%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fpull%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fpush%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Frename%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Frestart%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Frm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Frmi%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Frun%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsave%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsearch%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_logs%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ps%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rollback%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_scale%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_update%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstack%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_deploy%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ps%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_services%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstart%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstats%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fstop%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_ca%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_init%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join-token%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_leave%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock-key%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_update%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_df%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_events%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_info%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_prune%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftag%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftop%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_generate%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_load%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_revoke%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_sign%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_add%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_remove%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Funpause%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fupdate%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fversion%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_create%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_inspect%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_ls%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_prune%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_rm%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Fcommandline%2Fwait%2Findex.html create mode 100644 devdocs/docker/engine%2Freference%2Frun%2Findex.html create mode 100644 devdocs/docker/engine%2Frelease-notes%2Findex.html create mode 100644 devdocs/docker/engine%2Fscan%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Fapparmor%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Fcertificates%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Fprotect-access%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Frootless%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Fseccomp%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Ftrust%2Fdeploying_notary%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Ftrust%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_automation%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_delegation%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_key_mng%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_sandbox%2Findex.html create mode 100644 devdocs/docker/engine%2Fsecurity%2Fuserns-remap%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fadmin_guide%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fconfigs%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fnodes%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fpki%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fservices%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fingress%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fjoin-nodes%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fkey-concepts%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fmanage-nodes%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fraft%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fsecrets%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fservices%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fstack-deploy%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-mode%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fadd-nodes%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fcreate-swarm%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdelete-service%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdeploy-service%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdrain-node%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Finspect-service%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Frolling-update%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fscale-service%2Findex.html create mode 100644 devdocs/docker/engine%2Fswarm%2Fswarm_manager_locking%2Findex.html create mode 100644 devdocs/docker/get-started%2F02_our_app%2Findex.html create mode 100644 devdocs/docker/get-started%2F04_sharing_app%2Findex.html create mode 100644 devdocs/docker/get-started%2Findex.html create mode 100644 devdocs/docker/get-started%2Fkube-deploy%2Findex.html create mode 100644 devdocs/docker/get-started%2Forchestration%2Findex.html create mode 100644 devdocs/docker/get-started%2Foverview%2Findex.html create mode 100644 devdocs/docker/get-started%2Fresources%2Findex.html create mode 100644 devdocs/docker/get-started%2Fswarm-deploy%2Findex.html create mode 100644 devdocs/docker/index create mode 100644 devdocs/docker/index.html create mode 100644 devdocs/docker/machine%2Findex.html create mode 100644 devdocs/docker/metadata (limited to 'devdocs/docker') diff --git a/devdocs/docker/compose%2Fcli-command-compatibility%2Findex.html b/devdocs/docker/compose%2Fcli-command-compatibility%2Findex.html new file mode 100644 index 00000000..202dd1a9 --- /dev/null +++ b/devdocs/docker/compose%2Fcli-command-compatibility%2Findex.html @@ -0,0 +1,37 @@ +

Compose command compatibility with docker-compose

+ +

The compose command in the Docker CLI supports most of the docker-compose commands and flags. It is expected to be a drop-in replacement for docker-compose.

If you see any Compose functionality that is not available in the compose command, create an issue in the Compose GitHub repository, so we can prioritize it.

Commands or flags not yet implemented

The following commands have not been implemented yet, and may be implemented at a later time. Let us know if these commands are a higher priority for your use cases.

compose build --memory: This option is not yet supported by buildkit. The flag is currently supported, but is hidden to avoid breaking existing Compose usage. It does not have any effect.

Flags that will not be implemented

The list below includes the flags that we are not planning to support in Compose in the Docker CLI, either because they are already deprecated in docker-compose, or because they are not relevant for Compose in the Docker CLI.

Global flags:

Config command

The config command is intended to show the configuration used by Docker Compose to run the actual project. As we know, at some parts of the Compose file have a short and a long format. For example, the ports entry. In the example below we can see the config command expanding the ports section:

docker-compose.yml:

services:
+  web:
+    image: nginx
+    ports:
+      - 80:80
+

With $ docker compose config the output turns into:

services:
+  web:
+    image: nginx
+    networks:
+      default: null
+    ports:
+    - mode: ingress
+      target: 80
+      published: 80
+      protocol: tcp
+networks:
+  default:
+    name: workspace_default
+

The result above is a full size configuration of what will be used by Docker Compose to run the project.

New commands introduced in Compose v2

Copy

The cp command is intended to copy files or folders between service containers and the local filesystem.
This command is a bidirectional command, we can copy from or to the service containers.

Copy a file from a service container to the local filesystem:

$ docker compose cp my-service:~/path/to/myfile ~/local/path/to/copied/file
+

We can also copy from the local filesystem to all the running containers of a service:

$ docker compose cp --all ~/local/path/to/source/file my-service:~/path/to/copied/file
+

List

The ls command is intended to list the Compose projects. By default, the command only lists the running projects, we can use flags to display the stopped projects, to filter by conditions and change the output to json format for example.

$ docker compose ls --all --format json
+[{"Name":"dockergithubio","Status":"exited(1)","ConfigFiles":"/path/to/docker.github.io/docker-compose.yml"}]
+

Use --project-name with Compose commands

With the GA version of Compose, you can run some commands:

When a compose project has been loaded once, we can just use the -p or --project-name to reference it:

$ docker compose -p my-loaded-project restart my-service
+

This option works with the start, stop, restart and down commands.

+

documentation, docs, docker, compose, containers

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/cli-command-compatibility/ +

+
diff --git a/devdocs/docker/compose%2Fcompose-file%2Fbuild%2Findex.html b/devdocs/docker/compose%2Fcompose-file%2Fbuild%2Findex.html new file mode 100644 index 00000000..43a38b63 --- /dev/null +++ b/devdocs/docker/compose%2Fcompose-file%2Fbuild%2Findex.html @@ -0,0 +1,97 @@ +

Compose file build reference

+ +

Compose specification is a platform-neutral way to define multi-container applications. A Compose implementation focusing on development use-case to run application on local machine will obviously also support (re)building application from sources. The Compose Build specification allows to define the build process within a Compose file in a portable way.

Definitions

Compose Specification is extended to support an OPTIONAL build subsection on services. This section define the build requirements for service container image. Only a subset of Compose file services MAY define such a Build subsection, others being created based on Image attribute. When a Build subsection is present for a service, it is valid for a Compose file to miss an Image attribute for corresponding service, as Compose implementation can build image from source.

Build can be either specified as a single string defining a context path, or as a detailed build definition.

In the former case, the whole path is used as a Docker context to execute a docker build, looking for a canonical Dockerfile at context root. Context path can be absolute or relative, and if so relative path MUST be resolved from Compose file parent folder. As an absolute path prevent the Compose file to be portable, Compose implementation SHOULD warn user accordingly.

In the later case, build arguments can be specified, including an alternate Dockerfile location. This one can be absolute or relative path. If Dockerfile path is relative, it MUST be resolved from context path. As an absolute path prevent the Compose file to be portable, Compose implementation SHOULD warn user if an absolute alternate Dockerfile path is used.

Consistency with Image

When service definition do include both Image attribute and a Build section, Compose implementation can’t guarantee a pulled image is strictly equivalent to building the same image from sources. Without any explicit user directives, Compose implementation with Build support MUST first try to pull Image, then build from source if image was not found on registry. Compose implementation MAY offer options to customize this behaviour by user request.

Publishing built images

Compose implementation with Build support SHOULD offer an option to push built images to a registry. Doing so, it MUST NOT try to push service images without an Image attribute. Compose implementation SHOULD warn user about missing Image attribute which prevent image being pushed.

Compose implementation MAY offer a mechanism to compute an Image attribute for service when not explicitly declared in yaml file. In such a case, the resulting Compose configuration is considered to have a valid Image attribute, whenever the actual raw yaml file doesn’t explicitly declare one.

Illustrative sample

The following sample illustrates Compose specification concepts with a concrete sample application. The sample is non-normative.

services:
+  frontend:
+    image: awesome/webapp
+    build: ./webapp
+
+  backend:
+    image: awesome/database
+    build:
+      context: backend
+      dockerfile: ../backend.Dockerfile
+
+  custom:
+    build: ~/custom
+

When used to build service images from source, such a Compose file will create three docker images:

On push, both awesome/webapp and awesome/database docker images are pushed to (default) registry. custom service image is skipped as no Image attribute is set and user is warned about this missing attribute.

Build definition

The build element define configuration options that are applied by Compose implementations to build Docker image from source. build can be specified either as a string containing a path to the build context or a detailed structure:

services:
+  webapp:
+    build: ./dir
+

Using this string syntax, only the build context can be configured as a relative path to the Compose file’s parent folder. This path MUST be a directory and contain a Dockerfile.

Alternatively build can be an object with fields defined as follow

context (REQUIRED)

context defines either a path to a directory containing a Dockerfile, or a url to a git repository.

When the value supplied is a relative path, it MUST be interpreted as relative to the location of the Compose file. Compose implementations MUST warn user about absolute path used to define build context as those prevent Compose file for being portable.

build:
+  context: ./dir
+

dockerfile

dockerfile allows to set an alternate Dockerfile. A relative path MUST be resolved from the build context. Compose implementations MUST warn user about absolute path used to define Dockerfile as those prevent Compose file for being portable.

build:
+  context: .
+  dockerfile: webapp.Dockerfile
+

args

args define build arguments, i.e. Dockerfile ARG values.

Using following Dockerfile:

ARG GIT_COMMIT
+RUN echo "Based on commit: $GIT_COMMIT"
+

args can be set in Compose file under the build key to define GIT_COMMIT. args can be set a mapping or a list:

build:
+  context: .
+  args:
+    GIT_COMMIT: cdc3b19
+
build:
+  context: .
+  args:
+    - GIT_COMMIT=cdc3b19
+

Value can be omitted when specifying a build argument, in which case its value at build time MUST be obtained by user interaction, otherwise build arg won’t be set when building the Docker image.

args:
+  - GIT_COMMIT
+

ssh

ssh defines SSH authentications that the image builder SHOULD use during image build (e.g., cloning private repository)

ssh property syntax can be either:

Simple default sample

build:
+  context: .
+  ssh: 
+    - default   # mount the default ssh agent
+

or

build:
+  context: .
+  ssh: ["default"]   # mount the default ssh agent
+

Using a custom id myproject with path to a local SSH key:

build:
+  context: .
+  ssh: 
+    - myproject=~/.ssh/myproject.pem
+

Image builder can then rely on this to mount SSH key during build. For illustration, BuildKit extended syntax can be used to mount ssh key set by ID and access a secured resource:

RUN --mount=type=ssh,id=myproject git clone ...

cache_from

cache_from defines a list of sources the Image builder SHOULD use for cache resolution.

Cache location syntax MUST follow the global format [NAME|type=TYPE[,KEY=VALUE]]. Simple NAME is actually a shortcut notation for type=registry,ref=NAME.

Compose Builder implementations MAY support custom types, the Compose Specification defines canonical types which MUST be supported:

build:
+  context: .
+  cache_from:
+    - alpine:latest
+    - type=local,src=path/to/cache
+    - type=gha
+

Unsupported caches MUST be ignored and not prevent user from building image.

cache_to

cache_to defines a list of export locations to be used to share build cache with future builds.

build:
+  context: .
+  cache_to: 
+   - user/app:cache
+   - type=local,dest=path/to/cache
+

Cache target is defined using the same type=TYPE[,KEY=VALUE] syntax defined by cache_from.

Unsupported cache target MUST be ignored and not prevent user from building image.

extra_hosts

extra_hosts adds hostname mappings at build-time. Use the same syntax as extra_hosts.

extra_hosts:
+  - "somehost:162.242.195.82"
+  - "otherhost:50.31.209.229"
+

Compose implementations MUST create matching entry with the IP address and hostname in the container’s network configuration, which means for Linux /etc/hosts will get extra lines:

162.242.195.82  somehost
+50.31.209.229   otherhost
+

isolation

isolation specifies a build’s container isolation technology. Like isolation supported values are platform-specific.

labels

labels add metadata to the resulting image. labels can be set either as an array or a map.

reverse-DNS notation SHOULD be used to prevent labels from conflicting with those used by other software.

build:
+  context: .
+  labels:
+    com.example.description: "Accounting webapp"
+    com.example.department: "Finance"
+    com.example.label-with-empty-value: ""
+
build:
+  context: .
+  labels:
+    - "com.example.description=Accounting webapp"
+    - "com.example.department=Finance"
+    - "com.example.label-with-empty-value"
+

shm_size

shm_size set the size of the shared memory (/dev/shm partition on Linux) allocated for building Docker image. Specify as an integer value representing the number of bytes or as a string expressing a byte value.

build:
+  context: .
+  shm_size: '2gb'
+
build:
+  context: .
+  shm_size: 10000000
+

target

target defines the stage to build as defined inside a multi-stage Dockerfile.

build:
+  context: .
+  target: prod
+

Implementations

+

fig, composition, compose, docker

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/compose-file/build/ +

+
diff --git a/devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v2%2Findex.html b/devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v2%2Findex.html new file mode 100644 index 00000000..affc1ef1 --- /dev/null +++ b/devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v2%2Findex.html @@ -0,0 +1,679 @@ +

Compose file version 2 reference

+ +

Reference and guidelines

These topics describe version 2 of the Compose file format.

Compose and Docker compatibility matrix

There are several versions of the Compose file format – 1, 2, 2.x, and 3.x. The table below is a quick look. For full details on what each version includes and how to upgrade, see About versions and upgrading.

This table shows which Compose file versions support specific Docker releases.

Compose file format Docker Engine release
Compose specification 19.03.0+
3.8 19.03.0+
3.7 18.06.0+
3.6 18.02.0+
3.5 17.12.0+
3.4 17.09.0+
3.3 17.06.0+
3.2 17.04.0+
3.1 1.13.1+
3.0 1.13.0+
2.4 17.12.0+
2.3 17.06.0+
2.2 1.13.0+
2.1 1.12.0+
2.0 1.10.0+

In addition to Compose file format versions shown in the table, the Compose itself is on a release schedule, as shown in Compose releases, but file format versions do not necessarily increment with each release. For example, Compose file format 3.0 was first introduced in Compose release 1.10.0, and versioned gradually in subsequent releases.

The latest Compose file format is defined by the Compose Specification and is implemented by Docker Compose 1.27.0+.

Service configuration reference

The Compose file is a YAML file defining services, networks and volumes. The default path for a Compose file is ./docker-compose.yml.

Tip: You can use either a .yml or .yaml extension for this file. They both work.

A service definition contains configuration that is applied to each container started for that service, much like passing command-line parameters to docker run. Likewise, network and volume definitions are analogous to docker network create and docker volume create.

As with docker run, options specified in the Dockerfile, such as CMD, EXPOSE, VOLUME, ENV, are respected by default - you don’t need to specify them again in docker-compose.yml.

You can use environment variables in configuration values with a Bash-like ${VARIABLE} syntax - see variable substitution for full details.

This section contains a list of all configuration options supported by a service definition in version 2.

blkio_config

A set of configuration options to set block IO limits for this service.

version: "2.4"
+services:
+  foo:
+    image: busybox
+    blkio_config:
+      weight: 300
+      weight_device:
+        - path: /dev/sda
+          weight: 400
+      device_read_bps:
+        - path: /dev/sdb
+          rate: '12mb'
+      device_read_iops:
+        - path: /dev/sdb
+          rate: 120
+      device_write_bps:
+        - path: /dev/sdb
+          rate: '1024k'
+      device_write_iops:
+        - path: /dev/sdb
+          rate: 30
+

device_read_bps, device_write_bps

Set a limit in bytes per second for read / write operations on a given device. Each item in the list must have two keys:

device_read_iops, device_write_iops

Set a limit in operations per second for read / write operations on a given device. Each item in the list must have two keys:

weight

Modify the proportion of bandwidth allocated to this service relative to other services. Takes an integer value between 10 and 1000, with 500 being the default.

weight_device

Fine-tune bandwidth allocation by device. Each item in the list must have two keys:

build

Configuration options that are applied at build time.

build can be specified either as a string containing a path to the build context:

version: "2.4"
+services:
+  webapp:
+    build: ./dir
+

Or, as an object with the path specified under context and optionally Dockerfile and args:

version: "2.4"
+services:
+  webapp:
+    build:
+      context: ./dir
+      dockerfile: Dockerfile-alternate
+      args:
+        buildno: 1
+

If you specify image as well as build, then Compose names the built image with the webapp and optional tag specified in image:

build: ./dir
+image: webapp:tag
+

This results in an image named webapp and tagged tag, built from ./dir.

context

Added in version 2.0 file format.

Either a path to a directory containing a Dockerfile, or a url to a git repository.

When the value supplied is a relative path, it is interpreted as relative to the location of the Compose file. This directory is also the build context that is sent to the Docker daemon.

Compose builds and tags it with a generated name, and uses that image thereafter.

build:
+  context: ./dir
+

dockerfile

Alternate Dockerfile.

Compose uses an alternate file to build with. A build path must also be specified.

build:
+  context: .
+  dockerfile: Dockerfile-alternate
+

args

Added in version 2.0 file format.

Add build arguments, which are environment variables accessible only during the build process.

First, specify the arguments in your Dockerfile:

# syntax=docker/dockerfile:1
+
+ARG buildno
+ARG gitcommithash
+
+RUN echo "Build number: $buildno"
+RUN echo "Based on commit: $gitcommithash"
+

Then specify the arguments under the build key. You can pass a mapping or a list:

build:
+  context: .
+  args:
+    buildno: 1
+    gitcommithash: cdc3b19
+
build:
+  context: .
+  args:
+    - buildno=1
+    - gitcommithash=cdc3b19
+

Scope of build-args

In your Dockerfile, if you specify ARG before the FROM instruction, ARG is not available in the build instructions under FROM. If you need an argument to be available in both places, also specify it under the FROM instruction. Refer to the understand how ARGS and FROM interact section in the documentation for usage details.

You can omit the value when specifying a build argument, in which case its value at build time is the value in the environment where Compose is running.

args:
+  - buildno
+  - gitcommithash
+

Tip when using boolean values

YAML boolean values ("true", "false", "yes", "no", "on", "off") must be enclosed in quotes, so that the parser interprets them as strings.

cache_from

Added in version 2.2 file format

A list of images that the engine uses for cache resolution.

build:
+  context: .
+  cache_from:
+    - alpine:latest
+    - corp/web_app:3.14
+

extra_hosts

Add hostname mappings at build-time. Use the same values as the docker client --add-host parameter.

extra_hosts:
+  - "somehost:162.242.195.82"
+  - "otherhost:50.31.209.229"
+

An entry with the ip address and hostname is created in /etc/hosts inside containers for this build, e.g:

162.242.195.82  somehost
+50.31.209.229   otherhost
+

isolation

Added in version 2.1 file format.

Specify a build’s container isolation technology. On Linux, the only supported value is default. On Windows, acceptable values are default, process and hyperv. Refer to the Docker Engine docs for details.

If unspecified, Compose will use the isolation value found in the service’s definition to determine the value to use for builds.

labels

Added in version 2.1 file format

Add metadata to the resulting image using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

build:
+  context: .
+  labels:
+    com.example.description: "Accounting webapp"
+    com.example.department: "Finance"
+    com.example.label-with-empty-value: ""
+
build:
+  context: .
+  labels:
+    - "com.example.description=Accounting webapp"
+    - "com.example.department=Finance"
+    - "com.example.label-with-empty-value"
+

network

Added in version 2.2 file format

Set the network containers connect to for the RUN instructions during build.

build:
+  context: .
+  network: host
+
build:
+  context: .
+  network: custom_network_1
+

Use none to disable networking during build:

build:
+  context: .
+  network: none
+

shm_size

Added in version 2.3 file format

Set the size of the /dev/shm partition for this build’s containers. Specify as an integer value representing the number of bytes or as a string expressing a byte value.

build:
+  context: .
+  shm_size: '2gb'
+
build:
+  context: .
+  shm_size: 10000000
+

target

Added in version 2.3 file format

Build the specified stage as defined inside the Dockerfile. See the multi-stage build docs for details.

build:
+  context: .
+  target: prod
+

cap_add, cap_drop

Add or drop container capabilities. See man 7 capabilities for a full list.

cap_add:
+  - ALL
+
+cap_drop:
+  - NET_ADMIN
+  - SYS_ADMIN
+

cgroup_parent

Specify an optional parent cgroup for the container.

cgroup_parent: m-executor-abcd
+

command

Override the default command.

command: bundle exec thin -p 3000
+

The command can also be a list, in a manner similar to dockerfile:

command: ["bundle", "exec", "thin", "-p", "3000"]
+

container_name

Specify a custom container name, rather than a generated default name.

container_name: my-web-container
+

Because Docker container names must be unique, you cannot scale a service beyond 1 container if you have specified a custom name. Attempting to do so results in an error.

cpu_rt_runtime, cpu_rt_period

Added in version 2.2 file format

Configure CPU allocation parameters using the Docker daemon realtime scheduler.

cpu_rt_runtime: '400ms'
+cpu_rt_period: '1400us'
+

Integer values will use microseconds as units:

cpu_rt_runtime: 95000
+cpu_rt_period: 11000
+

device_cgroup_rules

Added in version 2.3 file format.

Add rules to the cgroup allowed devices list.

device_cgroup_rules:
+  - 'c 1:3 mr'
+  - 'a 7:* rmw'
+

devices

List of device mappings. Uses the same format as the --device docker client create option.

devices:
+  - "/dev/ttyUSB0:/dev/ttyUSB0"
+

depends_on

Added in version 2.0 file format.

Express dependency between services. Service dependencies cause the following behaviors:

Simple example:

version: "2.4"
+services:
+  web:
+    build: .
+    depends_on:
+      - db
+      - redis
+  redis:
+    image: redis
+  db:
+    image: postgres
+

Note

depends_on does not wait for db and redis to be “ready” before starting web - only until they have been started. If you need to wait for a service to be ready, see Controlling startup order for more on this problem and strategies for solving it.

Added in version 2.1 file format.

A healthcheck indicates that you want a dependency to wait for another container to be “healthy” (as indicated by a successful state from the healthcheck) before starting.

Example:

version: "2.4"
+services:
+  web:
+    build: .
+    depends_on:
+      db:
+        condition: service_healthy
+      redis:
+        condition: service_started
+  redis:
+    image: redis
+  db:
+    image: postgres
+    healthcheck:
+      test: "exit 0"
+

In the above example, Compose waits for the redis service to be started (legacy behavior) and the db service to be healthy before starting web.

See the healthcheck section for complementary information.

dns

Custom DNS servers. Can be a single value or a list.

dns: 8.8.8.8
+
dns:
+  - 8.8.8.8
+  - 9.9.9.9
+

dns_opt

List of custom DNS options to be added to the container’s resolv.conf file.

dns_opt:
+  - use-vc
+  - no-tld-query
+

Custom DNS search domains. Can be a single value or a list.

dns_search: example.com
+
dns_search:
+  - dc1.example.com
+  - dc2.example.com
+

entrypoint

Override the default entrypoint.

entrypoint: /code/entrypoint.sh
+

The entrypoint can also be a list, in a manner similar to dockerfile:

entrypoint: ["php", "-d", "memory_limit=-1", "vendor/bin/phpunit"]
+

Note

Setting entrypoint both overrides any default entrypoint set on the service’s image with the ENTRYPOINT Dockerfile instruction, and clears out any default command on the image - meaning that if there’s a CMD instruction in the Dockerfile, it is ignored.

env_file

Add environment variables from a file. Can be a single value or a list.

If you have specified a Compose file with docker-compose -f FILE, paths in env_file are relative to the directory that file is in.

Environment variables declared in the environment section override these values – this holds true even if those values are empty or undefined.

env_file: .env
+
env_file:
+  - ./common.env
+  - ./apps/web.env
+  - /opt/runtime_opts.env
+

Compose expects each line in an env file to be in VAR=VAL format. Lines beginning with # are treated as comments and are ignored. Blank lines are also ignored.

# Set Rails/Rack environment
+RACK_ENV=development
+

Note

If your service specifies a build option, variables defined in environment files are not automatically visible during the build. Use the args sub-option of build to define build-time environment variables.

The value of VAL is used as is and not modified at all. For example if the value is surrounded by quotes (as is often the case of shell variables), the quotes are included in the value passed to Compose.

Keep in mind that the order of files in the list is significant in determining the value assigned to a variable that shows up more than once. The files in the list are processed from the top down. For the same variable specified in file a.env and assigned a different value in file b.env, if b.env is listed below (after), then the value from b.env stands. For example, given the following declaration in docker-compose.yml:

services:
+  some-service:
+    env_file:
+      - a.env
+      - b.env
+

And the following files:

# a.env
+VAR=1
+

and

# b.env
+VAR=hello
+

$VAR is hello.

environment

Add environment variables. You can use either an array or a dictionary. Any boolean values (true, false, yes, no) need to be enclosed in quotes to ensure they are not converted to True or False by the YML parser.

Environment variables with only a key are resolved to their values on the machine Compose is running on, which can be helpful for secret or host-specific values.

environment:
+  RACK_ENV: development
+  SHOW: 'true'
+  SESSION_SECRET:
+
environment:
+  - RACK_ENV=development
+  - SHOW=true
+  - SESSION_SECRET
+

Note

If your service specifies a build option, variables defined in environment are not automatically visible during the build. Use the args sub-option of build to define build-time environment variables.

expose

Expose ports without publishing them to the host machine - they’ll only be accessible to linked services. Only the internal port can be specified.

expose:
+  - "3000"
+  - "8000"
+

extends

Extend another service, in the current file or another, optionally overriding configuration.

You can use extends on any service together with other configuration keys. The extends value must be a dictionary defined with a required service and an optional file key.

extends:
+  file: common.yml
+  service: webapp
+

The service is the name of the service being extended, for example web or database. The file is the location of a Compose configuration file defining that service.

If you omit the file Compose looks for the service configuration in the current file. The file value can be an absolute or relative path. If you specify a relative path, Compose treats it as relative to the location of the current file.

You can extend a service that itself extends another. You can extend indefinitely. Compose does not support circular references and docker-compose returns an error if it encounters one.

For more on extends, see the the extends documentation.

Link to containers started outside this docker-compose.yml or even outside of Compose, especially for containers that provide shared or common services. external_links follow semantics similar to the legacy option links when specifying both the container name and the link alias (CONTAINER:ALIAS).

external_links:
+  - redis_1
+  - project_db_1:mysql
+  - project_db_1:postgresql
+

Note

If you’re using the version 2 or above file format, the externally-created containers must be connected to at least one of the same networks as the service that is linking to them. Links are a legacy option. We recommend using networks instead.

extra_hosts

Add hostname mappings. Use the same values as the docker client --add-host parameter.

extra_hosts:
+  - "somehost:162.242.195.82"
+  - "otherhost:50.31.209.229"
+

An entry with the ip address and hostname is created in /etc/hosts inside containers for this service, e.g:

162.242.195.82  somehost
+50.31.209.229   otherhost
+

group_add

Specify additional groups (by name or number) which the user inside the container should be a member of. Groups must exist in both the container and the host system to be added. An example of where this is useful is when multiple containers (running as different users) need to all read or write the same file on the host system. That file can be owned by a group shared by all the containers, and specified in group_add. See the Docker documentation for more details.

A full example:

version: "2.4"
+services:
+  myservice:
+    image: alpine
+    group_add:
+      - mail
+

Running id inside the created container shows that the user belongs to the mail group, which would not have been the case if group_add were not used.

healthcheck

Added in version 2.1 file format.

Configure a check that’s run to determine whether or not containers for this service are “healthy”. See the docs for the HEALTHCHECK Dockerfile instruction for details on how healthchecks work.

healthcheck:
+  test: ["CMD", "curl", "-f", "http://localhost"]
+  interval: 1m30s
+  timeout: 10s
+  retries: 3
+  start_period: 40s
+

interval, timeout and start_period are specified as durations.

Added in version 2.3 file format.

The start_period option was added in file format 2.3.

test must be either a string or a list. If it’s a list, the first item must be either NONE, CMD or CMD-SHELL. If it’s a string, it’s equivalent to specifying CMD-SHELL followed by that string.

# Hit the local web app
+test: ["CMD", "curl", "-f", "http://localhost"]
+

As above, but wrapped in /bin/sh. Both forms below are equivalent.

test: ["CMD-SHELL", "curl -f http://localhost || exit 1"]
+
test: curl -f https://localhost || exit 1
+

To disable any default healthcheck set by the image, you can use disable: true. This is equivalent to specifying test: ["NONE"].

healthcheck:
+  disable: true
+

image

Specify the image to start the container from. Can either be a repository/tag or a partial image ID.

image: redis
+
image: ubuntu:18.04
+
image: tutum/influxdb
+
image: example-registry.com:4000/postgresql
+
image: a4bc65fd
+

If the image does not exist, Compose attempts to pull it, unless you have also specified build, in which case it builds it using the specified options and tags it with the specified tag.

init

Added in version 2.2 file format.

Run an init inside the container that forwards signals and reaps processes. Set this option to true to enable this feature for the service.

version: "2.4"
+services:
+  web:
+    image: alpine:latest
+    init: true
+

The default init binary that is used is Tini, and is installed in /usr/libexec/docker-init on the daemon host. You can configure the daemon to use a custom init binary through the init-path configuration option.

isolation

Added in version 2.1 file format.

Specify a container’s isolation technology. On Linux, the only supported value is default. On Windows, acceptable values are default, process and hyperv. Refer to the Docker Engine docs for details.

labels

Add metadata to containers using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Accounting webapp"
+  com.example.department: "Finance"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Accounting webapp"
+  - "com.example.department=Finance"
+  - "com.example.label-with-empty-value"
+

Link to containers in another service. Either specify both the service name and a link alias ("SERVICE:ALIAS"), or just the service name.

Links are a legacy option. We recommend using networks instead.

web:
+  links:
+    - "db"
+    - "db:database"
+    - "redis"
+

Containers for the linked service are reachable at a hostname identical to the alias, or the service name if no alias was specified.

Links are not required to enable services to communicate - by default, any service can reach any other service at that service’s name. (See also, the Links topic in Networking in Compose.)

Links also express dependency between services in the same way as depends_on, so they determine the order of service startup.

Note

If you define both links and networks, services with links between them must share at least one network in common to communicate. We recommend using networks instead.

logging

Logging configuration for the service.

logging:
+  driver: syslog
+  options:
+    syslog-address: "tcp://192.168.0.42:123"
+

The driver name specifies a logging driver for the service’s containers, as with the --log-driver option for docker run (documented here).

The default value is json-file.

driver: "json-file"
+
driver: "syslog"
+
driver: "none"
+

Note

Only the json-file and journald drivers make the logs available directly from docker-compose up and docker-compose logs. Using any other driver does not print any logs.

Specify logging options for the logging driver with the options key, as with the --log-opt option for docker run.

Logging options are key-value pairs. An example of syslog options:

driver: "syslog"
+options:
+  syslog-address: "tcp://192.168.0.42:123"
+

network_mode

Changed in version 2 file format.

Network mode. Use the same values as the docker client --network parameter, plus the special form service:[service name].

network_mode: "bridge"
+
network_mode: "host"
+
network_mode: "none"
+
network_mode: "service:[service name]"
+
network_mode: "container:[container name/id]"
+

networks

Changed in version 2 file format.

Networks to join, referencing entries under the top-level networks key.

services:
+  some-service:
+    networks:
+     - some-network
+     - other-network
+

aliases

Aliases (alternative hostnames) for this service on the network. Other containers on the same network can use either the service name or this alias to connect to one of the service’s containers.

Since aliases is network-scoped, the same service can have different aliases on different networks.

Note

A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name resolves to is not guaranteed.

The general format is shown here.

services:
+  some-service:
+    networks:
+      some-network:
+        aliases:
+          - alias1
+          - alias3
+      other-network:
+        aliases:
+          - alias2
+

In the example below, three services are provided (web, worker, and db), along with two networks (new and legacy). The db service is reachable at the hostname db or database on the new network, and at db or mysql on the legacy network.

version: "2.4"
+
+services:
+  web:
+    image: "nginx:alpine"
+    networks:
+      - new
+
+  worker:
+    image: "my-worker-image:latest"
+    networks:
+      - legacy
+
+  db:
+    image: mysql
+    networks:
+      new:
+        aliases:
+          - database
+      legacy:
+        aliases:
+          - mysql
+
+networks:
+  new:
+  legacy:
+

ipv4_address, ipv6_address

Specify a static IP address for containers for this service when joining the network.

The corresponding network configuration in the top-level networks section must have an ipam block with subnet and gateway configurations covering each static address.

If IPv6 addressing is desired, the enable_ipv6 option must be set.

An example:

version: "2.4"
+
+services:
+  app:
+    image: busybox
+    command: ifconfig
+    networks:
+      app_net:
+        ipv4_address: 172.16.238.10
+        ipv6_address: 2001:3984:3989::10
+
+networks:
+  app_net:
+    driver: bridge
+    enable_ipv6: true
+    ipam:
+      driver: default
+      config:
+        - subnet: 172.16.238.0/24
+          gateway: 172.16.238.1
+        - subnet: 2001:3984:3989::/64
+          gateway: 2001:3984:3989::1
+

Added in version 2.1 file format.

Specify a list of link-local IPs. Link-local IPs are special IPs which belong to a well known subnet and are purely managed by the operator, usually dependent on the architecture where they are deployed. Therefore they are not managed by docker (IPAM driver).

Example usage:

version: "2.4"
+services:
+  app:
+    image: busybox
+    command: top
+    networks:
+      app_net:
+        link_local_ips:
+          - 57.123.22.11
+          - 57.123.22.13
+networks:
+  app_net:
+    driver: bridge
+

priority

Specify a priority to indicate in which order Compose should connect the service’s containers to its networks. If unspecified, the default value is 0.

In the following example, the app service connects to app_net_1 first as it has the highest priority. It then connects to app_net_3, then app_net_2, which uses the default priority value of 0.

version: "2.4"
+services:
+  app:
+    image: busybox
+    command: top
+    networks:
+      app_net_1:
+        priority: 1000
+      app_net_2:
+
+      app_net_3:
+        priority: 100
+networks:
+  app_net_1:
+  app_net_2:
+  app_net_3:
+

Note

If multiple networks have the same priority, the connection order is undefined.

pid

pid: "host"
+
pid: "container:custom_container_1"
+
pid: "service:foobar"
+

If set to one of the following forms: container:<container_name>, service:<service_name>, the service shares the PID address space of the designated container or service.

If set to “host”, the service’s PID mode is the host PID mode. This turns on sharing between container and the host operating system the PID address space. Containers launched with this flag can access and manipulate other containers in the bare-metal machine’s namespace and vice versa.

Added in version 2.1 file format.

The service: and container: forms require version 2.1 or above

pids_limit

Added in version 2.1 file format.

Tunes a container’s PIDs limit. Set to -1 for unlimited PIDs.

pids_limit: 10
+

platform

Added in version 2.4 file format.

Target platform containers for this service will run on, using the os[/arch[/variant]] syntax, e.g.

platform: osx
+
platform: windows/amd64
+
platform: linux/arm64/v8
+

This parameter determines which version of the image will be pulled and/or on which platform the service’s build will be performed.

ports

Expose ports. Either specify both ports (HOST:CONTAINER), or just the container port (an ephemeral host port is chosen).

Note

When mapping ports in the HOST:CONTAINER format, you may experience erroneous results when using a container port lower than 60, because YAML parses numbers in the format xx:yy as a base-60 value. For this reason, we recommend always explicitly specifying your port mappings as strings.

ports:
+  - "3000"
+  - "3000-3005"
+  - "8000:8000"
+  - "9090-9091:8080-8081"
+  - "49100:22"
+  - "127.0.0.1:8001:8001"
+  - "127.0.0.1:5000-5010:5000-5010"
+  - "6060:6060/udp"
+  - "12400-12500:1240"
+

runtime

Added in version 2.3 file format.

Specify which runtime to use for the service’s containers. Default runtime and available runtimes are listed in the output of docker info.

web:
+  image: busybox:latest
+  command: true
+  runtime: runc
+

scale

Added in version 2.2 file format.

Specify the default number of containers to deploy for this service. Whenever you run docker-compose up, Compose creates or removes containers to match the specified number. This value can be overridden using the --scale flag.

web:
+  image: busybox:latest
+  command: echo 'scaled'
+  scale: 3
+

security_opt

Override the default labeling scheme for each container.

security_opt:
+  - label:user:USER
+  - label:role:ROLE
+

stop_grace_period

Specify how long to wait when attempting to stop a container if it doesn’t handle SIGTERM (or whatever stop signal has been specified with stop_signal), before sending SIGKILL. Specified as a duration.

stop_grace_period: 1s
+
stop_grace_period: 1m30s
+

By default, stop waits 10 seconds for the container to exit before sending SIGKILL.

stop_signal

Sets an alternative signal to stop the container. By default stop uses SIGTERM. Setting an alternative signal using stop_signal causes stop to send that signal instead.

stop_signal: SIGUSR1
+

storage_opt

Added in version 2.1 file format.

Set storage driver options for this service.

storage_opt:
+  size: '1G'
+

sysctls

Added in version 2.1 file format.

Kernel parameters to set in the container. You can use either an array or a dictionary.

sysctls:
+  net.core.somaxconn: 1024
+  net.ipv4.tcp_syncookies: 0
+
sysctls:
+  - net.core.somaxconn=1024
+  - net.ipv4.tcp_syncookies=0
+

tmpfs

Mount a temporary file system inside the container. Can be a single value or a list.

tmpfs: /run
+
tmpfs:
+  - /run
+  - /tmp
+

ulimits

Override the default ulimits for a container. You can either specify a single limit as an integer or soft/hard limits as a mapping.

ulimits:
+  nproc: 65535
+  nofile:
+    soft: 20000
+    hard: 40000
+

userns_mode

Added in version 2.1 file format.

userns_mode: "host"
+

Disables the user namespace for this service, if Docker daemon is configured with user namespaces. See dockerd for more information.

volumes

Mount host paths or named volumes. Named volumes need to be specified with the top-level volumes key.

Short syntax

The short syntax uses the generic [SOURCE:]TARGET[:MODE] format, where SOURCE can be either a host path or volume name. TARGET is the container path where the volume is mounted. Standard modes are ro for read-only and rw for read-write (default).

You can mount a relative path on the host, which expands relative to the directory of the Compose configuration file being used. Relative paths should always begin with . or ...

volumes:
+  # Just specify a path and let the Engine create a volume
+  - /var/lib/mysql
+
+  # Specify an absolute path mapping
+  - /opt/data:/var/lib/mysql
+
+  # Path on the host, relative to the Compose file
+  - ./cache:/tmp/cache
+
+  # User-relative path
+  - ~/configs:/etc/configs/:ro
+
+  # Named volume
+  - datavolume:/var/lib/mysql
+

Long syntax

Added in version 2.3 file format.

The long form syntax allows the configuration of additional fields that can’t be expressed in the short form.

version: "2.4"
+services:
+  web:
+    image: nginx:alpine
+    ports:
+      - "80:80"
+    volumes:
+      - type: volume
+        source: mydata
+        target: /data
+        volume:
+          nocopy: true
+      - type: bind
+        source: ./static
+        target: /opt/app/static
+
+networks:
+  webnet:
+
+volumes:
+  mydata:
+

Note

When creating bind mounts, using the long syntax requires the referenced folder to be created beforehand. Using the short syntax creates the folder on the fly if it doesn’t exist. See the bind mounts documentation for more information.

volume_driver

Specify a default volume driver to be used for all declared volumes on this service.

volume_driver: mydriver
+

Note

In version 2 files, this option only applies to anonymous volumes (those specified in the image, or specified under volumes without an explicit named volume or host path). To configure the driver for a named volume, use the driver key under the entry in the top-level volumes option.

See Docker Volumes and Volume Plugins for more information.

volumes_from

Mount all of the volumes from another service or container, optionally specifying read-only access (ro) or read-write (rw). If no access level is specified, then read-write is used.

volumes_from:
+  - service_name
+  - service_name:ro
+  - container:container_name
+  - container:container_name:rw
+

Changed in version 2 file format.

restart

no is the default restart policy, and it doesn’t restart a container under any circumstance. When always is specified, the container always restarts. The on-failure policy restarts a container if the exit code indicates an on-failure error.

restart: "no"
+
restart: "always"
+
restart: "on-failure"
+
restart: "unless-stopped"
+

cpu_count, cpu_percent, cpu_shares, cpu_period, cpu_quota, cpus, cpuset, domainname, hostname, ipc, mac_address, mem_limit, memswap_limit, mem_swappiness, mem_reservation, oom_kill_disable, oom_score_adj, privileged, read_only, shm_size, stdin_open, tty, user, working_dir

Each of these is a single value, analogous to its docker run counterpart.

Added in version 2.2 file format.

The cpu_count, cpu_percent, and cpus options were added in version 2.2.

Added in version 2.1 file format.

The oom_kill_disable and cpu_period options were added in version 2.1.

cpu_count: 2
+cpu_percent: 50
+cpus: 0.5
+cpu_shares: 73
+cpu_quota: 50000
+cpu_period: 20ms
+cpuset: 0,1
+
+user: postgresql
+working_dir: /code
+
+domainname: foo.com
+hostname: foo
+ipc: host
+mac_address: 02:42:ac:11:65:43
+
+mem_limit: 1000000000
+memswap_limit: 2000000000
+mem_reservation: 512m
+privileged: true
+
+oom_score_adj: 500
+oom_kill_disable: true
+
+read_only: true
+shm_size: 64M
+stdin_open: true
+tty: true
+

Specifying durations

Some configuration options, such as the interval and timeout sub-options for healthcheck, accept a duration as a string in a format that looks like this:

2.5s
+10s
+1m30s
+2h32m
+5h34m56s
+

The supported units are us, ms, s, m and h.

Specifying byte values

Some configuration options, such as the device_read_bps sub-option for blkio_config, accept a byte value as a string in a format that looks like this:

2b
+1024kb
+2048k
+300m
+1gb
+

The supported units are b, k, m and g, and their alternative notation kb, mb and gb. Decimal values are not supported at this time.

Volume configuration reference

While it is possible to declare volumes on the fly as part of the service declaration, this section allows you to create named volumes that can be reused across multiple services (without relying on volumes_from), and are easily retrieved and inspected using the docker command line or API. See the docker volume subcommand documentation for more information.

See use volumes and volume plugins for general information on volumes.

Here’s an example of a two-service setup where a database’s data directory is shared with another service as a volume so that it can be periodically backed up:

version: "2.4"
+
+services:
+  db:
+    image: db
+    volumes:
+      - data-volume:/var/lib/db
+  backup:
+    image: backup-service
+    volumes:
+      - data-volume:/var/lib/backup/data
+
+volumes:
+  data-volume:
+

An entry under the top-level volumes key can be empty, in which case it uses the default driver configured by the Engine (in most cases, this is the local driver). Optionally, you can configure it with the following keys:

driver

Specify which volume driver should be used for this volume. Defaults to whatever driver the Docker Engine has been configured to use, which in most cases is local. If the driver is not available, the Engine returns an error when docker-compose up tries to create the volume.

driver: foobar
+

driver_opts

Specify a list of options as key-value pairs to pass to the driver for this volume. Those options are driver-dependent - consult the driver’s documentation for more information. Optional.

volumes:
+  example:
+    driver_opts:
+      type: "nfs"
+      o: "addr=10.40.0.199,nolock,soft,rw"
+      device: ":/docker/example"
+

external

If set to true, specifies that this volume has been created outside of Compose. docker-compose up does not attempt to create it, and raises an error if it doesn’t exist.

For version 2.0 of the format, external cannot be used in conjunction with other volume configuration keys (driver, driver_opts, labels). This limitation no longer exists for version 2.1 and above.

In the example below, instead of attempting to create a volume called [projectname]_data, Compose looks for an existing volume simply called data and mount it into the db service’s containers.

version: "2.4"
+
+services:
+  db:
+    image: postgres
+    volumes:
+      - data:/var/lib/postgresql/data
+
+volumes:
+  data:
+    external: true
+

You can also specify the name of the volume separately from the name used to refer to it within the Compose file:

volumes:
+  data:
+    external:
+      name: actual-name-of-volume
+

Deprecated in version 2.1 file format.

external.name was deprecated in version 2.1 file format use name instead.

labels

Added in version 2.1 file format.

Add metadata to containers using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Database volume"
+  com.example.department: "IT/Ops"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Database volume"
+  - "com.example.department=IT/Ops"
+  - "com.example.label-with-empty-value"
+

name

Added in version 2.1 file format.

Set a custom name for this volume. The name field can be used to reference volumes that contain special characters. The name is used as is and will not be scoped with the stack name.

version: "2.4"
+volumes:
+  data:
+    name: my-app-data
+

It can also be used in conjunction with the external property:

version: "2.4"
+volumes:
+  data:
+    external: true
+    name: my-app-data
+

Network configuration reference

The top-level networks key lets you specify networks to be created. For a full explanation of Compose’s use of Docker networking features, see the Networking guide.

driver

Specify which driver should be used for this network.

The default driver depends on how the Docker Engine you’re using is configured, but in most instances it is bridge on a single host and overlay on a Swarm.

The Docker Engine returns an error if the driver is not available.

driver: overlay
+

Changed in version 2.1 file format.

Starting with Compose file format 2.1, overlay networks are always created as attachable, and this is not configurable. This means that standalone containers can connect to overlay networks.

driver_opts

Specify a list of options as key-value pairs to pass to the driver for this network. Those options are driver-dependent - consult the driver’s documentation for more information. Optional.

driver_opts:
+  foo: "bar"
+  baz: 1
+

enable_ipv6

Added in version 2.1 file format.

Enable IPv6 networking on this network.

ipam

Specify custom IPAM config. This is an object with several properties, each of which is optional:

A full example:

ipam:
+  driver: default
+  config:
+    - subnet: 172.28.0.0/16
+      ip_range: 172.28.5.0/24
+      gateway: 172.28.5.254
+      aux_addresses:
+        host1: 172.28.1.5
+        host2: 172.28.1.6
+        host3: 172.28.1.7
+  options:
+    foo: bar
+    baz: "0"
+

internal

By default, Docker also connects a bridge network to it to provide external connectivity. If you want to create an externally isolated overlay network, you can set this option to true.

labels

Added in version 2.1 file format.

Add metadata to containers using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Financial transaction network"
+  com.example.department: "Finance"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Financial transaction network"
+  - "com.example.department=Finance"
+  - "com.example.label-with-empty-value"
+

external

If set to true, specifies that this network has been created outside of Compose. docker-compose up does not attempt to create it, and raises an error if it doesn’t exist.

For version 2.0 of the format, external cannot be used in conjunction with other network configuration keys (driver, driver_opts, ipam, internal). This limitation no longer exists for version 2.1 and above.

In the example below, proxy is the gateway to the outside world. Instead of attempting to create a network called [projectname]_outside, Compose looks for an existing network simply called outside and connect the proxy service’s containers to it.

version: "2.4"
+
+services:
+  proxy:
+    build: ./proxy
+    networks:
+      - outside
+      - default
+  app:
+    build: ./app
+    networks:
+      - default
+
+networks:
+  outside:
+    external: true
+

You can also specify the name of the network separately from the name used to refer to it within the Compose file:

version: "2.4"
+networks:
+  outside:
+    external:
+      name: actual-name-of-network
+

Not supported for version 2 docker-compose files. Use network_mode instead.

name

Added in version 2.1 file format.

Set a custom name for this network. The name field can be used to reference networks which contain special characters. The name is used as is and will not be scoped with the stack name.

version: "2.4"
+networks:
+  network1:
+    name: my-app-net
+

It can also be used in conjunction with the external property:

version: "2.4"
+networks:
+  network1:
+    external: true
+    name: my-app-net
+

Variable substitution

Your configuration options can contain environment variables. Compose uses the variable values from the shell environment in which docker-compose is run. For example, suppose the shell contains POSTGRES_VERSION=9.3 and you supply this configuration:

db:
+  image: "postgres:${POSTGRES_VERSION}"
+

When you run docker-compose up with this configuration, Compose looks for the POSTGRES_VERSION environment variable in the shell and substitutes its value in. For this example, Compose resolves the image to postgres:9.3 before running the configuration.

If an environment variable is not set, Compose substitutes with an empty string. In the example above, if POSTGRES_VERSION is not set, the value for the image option is postgres:.

You can set default values for environment variables using a .env file, which Compose automatically looks for in project directory (parent folder of your Compose file). Values set in the shell environment override those set in the .env file.

Note when using docker stack deploy

The .env file feature only works when you use the docker-compose up command and does not work with docker stack deploy.

Both $VARIABLE and ${VARIABLE} syntax are supported. Additionally when using the 2.1 file format, it is possible to provide inline default values using typical shell syntax:

Similarly, the following syntax allows you to specify mandatory variables:

Other extended shell-style features, such as ${VARIABLE/foo/bar}, are not supported.

You can use a $$ (double-dollar sign) when your configuration needs a literal dollar sign. This also prevents Compose from interpolating a value, so a $$ allows you to refer to environment variables that you don’t want processed by Compose.

web:
+  build: .
+  command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE"
+

If you forget and use a single dollar sign ($), Compose interprets the value as an environment variable and warns you:

The VAR_NOT_INTERPOLATED_BY_COMPOSE is not set. Substituting an empty string.
+

Extension fields

Added in version 2.1 file format.

It is possible to re-use configuration fragments using extension fields. Those special fields can be of any format as long as they are located at the root of your Compose file and their name start with the x- character sequence.

Note

Starting with the 3.7 format (for the 3.x series) and 2.4 format (for the 2.x series), extension fields are also allowed at the root of service, volume, network, config and secret definitions.

version: "3.9"
+x-custom:
+  items:
+    - a
+    - b
+  options:
+    max-size: '12m'
+  name: "custom"
+

The contents of those fields are ignored by Compose, but they can be inserted in your resource definitions using YAML anchors. For example, if you want several of your services to use the same logging configuration:

logging:
+  options:
+    max-size: '12m'
+    max-file: '5'
+  driver: json-file
+

You may write your Compose file as follows:

version: "3.9"
+x-logging:
+  &default-logging
+  options:
+    max-size: '12m'
+    max-file: '5'
+  driver: json-file
+
+services:
+  web:
+    image: myapp/web:latest
+    logging: *default-logging
+  db:
+    image: mysql:latest
+    logging: *default-logging
+

It is also possible to partially override values in extension fields using the YAML merge type. For example:

version: "3.9"
+x-volumes:
+  &default-volume
+  driver: foobar-storage
+
+services:
+  web:
+    image: myapp/web:latest
+    volumes: ["vol1", "vol2", "vol3"]
+volumes:
+  vol1: *default-volume
+  vol2:
+    << : *default-volume
+    name: volume02
+  vol3:
+    << : *default-volume
+    driver: default
+    name: volume-local
+

Compose documentation

+

fig, composition, compose version 2, docker

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/compose-file/compose-file-v2/ +

+
diff --git a/devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v3%2Findex.html b/devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v3%2Findex.html new file mode 100644 index 00000000..de60a581 --- /dev/null +++ b/devdocs/docker/compose%2Fcompose-file%2Fcompose-file-v3%2Findex.html @@ -0,0 +1,996 @@ +

Compose file version 3 reference

+ +

Reference and guidelines

These topics describe version 3 of the Compose file format. This is the newest version.

Compose and Docker compatibility matrix

There are several versions of the Compose file format – 1, 2, 2.x, and 3.x. The table below is a quick look. For full details on what each version includes and how to upgrade, see About versions and upgrading.

This table shows which Compose file versions support specific Docker releases.

Compose file format Docker Engine release
Compose specification 19.03.0+
3.8 19.03.0+
3.7 18.06.0+
3.6 18.02.0+
3.5 17.12.0+
3.4 17.09.0+
3.3 17.06.0+
3.2 17.04.0+
3.1 1.13.1+
3.0 1.13.0+
2.4 17.12.0+
2.3 17.06.0+
2.2 1.13.0+
2.1 1.12.0+
2.0 1.10.0+

In addition to Compose file format versions shown in the table, the Compose itself is on a release schedule, as shown in Compose releases, but file format versions do not necessarily increment with each release. For example, Compose file format 3.0 was first introduced in Compose release 1.10.0, and versioned gradually in subsequent releases.

The latest Compose file format is defined by the Compose Specification and is implemented by Docker Compose 1.27.0+.

Compose file structure and examples

Here is a sample Compose file from the voting app sample used in the Docker for Beginners lab topic on Deploying an app to a Swarm:

+version: "3.9"
+services:
+
+  redis:
+    image: redis:alpine
+    ports:
+      - "6379"
+    networks:
+      - frontend
+    deploy:
+      replicas: 2
+      update_config:
+        parallelism: 2
+        delay: 10s
+      restart_policy:
+        condition: on-failure
+
+  db:
+    image: postgres:9.4
+    volumes:
+      - db-data:/var/lib/postgresql/data
+    networks:
+      - backend
+    deploy:
+      placement:
+        max_replicas_per_node: 1
+        constraints:
+          - "node.role==manager"
+
+  vote:
+    image: dockersamples/examplevotingapp_vote:before
+    ports:
+      - "5000:80"
+    networks:
+      - frontend
+    depends_on:
+      - redis
+    deploy:
+      replicas: 2
+      update_config:
+        parallelism: 2
+      restart_policy:
+        condition: on-failure
+
+  result:
+    image: dockersamples/examplevotingapp_result:before
+    ports:
+      - "5001:80"
+    networks:
+      - backend
+    depends_on:
+      - db
+    deploy:
+      replicas: 1
+      update_config:
+        parallelism: 2
+        delay: 10s
+      restart_policy:
+        condition: on-failure
+
+  worker:
+    image: dockersamples/examplevotingapp_worker
+    networks:
+      - frontend
+      - backend
+    deploy:
+      mode: replicated
+      replicas: 1
+      labels: [APP=VOTING]
+      restart_policy:
+        condition: on-failure
+        delay: 10s
+        max_attempts: 3
+        window: 120s
+      placement:
+        constraints:
+          - "node.role==manager"
+
+  visualizer:
+    image: dockersamples/visualizer:stable
+    ports:
+      - "8080:8080"
+    stop_grace_period: 1m30s
+    volumes:
+      - "/var/run/docker.sock:/var/run/docker.sock"
+    deploy:
+      placement:
+        constraints:
+          - "node.role==manager"
+
+networks:
+  frontend:
+  backend:
+
+volumes:
+  db-data:
+

The topics on this reference page are organized alphabetically by top-level key to reflect the structure of the Compose file itself. Top-level keys that define a section in the configuration file such as build, deploy, depends_on, networks, and so on, are listed with the options that support them as sub-topics. This maps to the <key>: <option>: <value> indent structure of the Compose file.

Service configuration reference

The Compose file is a YAML file defining services, networks and volumes. The default path for a Compose file is ./docker-compose.yml.

Tip: You can use either a .yml or .yaml extension for this file. They both work.

A service definition contains configuration that is applied to each container started for that service, much like passing command-line parameters to docker run. Likewise, network and volume definitions are analogous to docker network create and docker volume create.

As with docker run, options specified in the Dockerfile, such as CMD, EXPOSE, VOLUME, ENV, are respected by default - you don’t need to specify them again in docker-compose.yml.

You can use environment variables in configuration values with a Bash-like ${VARIABLE} syntax - see variable substitution for full details.

This section contains a list of all configuration options supported by a service definition in version 3.

build

Configuration options that are applied at build time.

build can be specified either as a string containing a path to the build context:

version: "3.9"
+services:
+  webapp:
+    build: ./dir
+

Or, as an object with the path specified under context and optionally Dockerfile and args:

version: "3.9"
+services:
+  webapp:
+    build:
+      context: ./dir
+      dockerfile: Dockerfile-alternate
+      args:
+        buildno: 1
+

If you specify image as well as build, then Compose names the built image with the webapp and optional tag specified in image:

build: ./dir
+image: webapp:tag
+

This results in an image named webapp and tagged tag, built from ./dir.

Note when using docker stack deploy

The build option is ignored when deploying a stack in swarm mode The docker stack command does not build images before deploying.

context

Either a path to a directory containing a Dockerfile, or a url to a git repository.

When the value supplied is a relative path, it is interpreted as relative to the location of the Compose file. This directory is also the build context that is sent to the Docker daemon.

Compose builds and tags it with a generated name, and uses that image thereafter.

build:
+  context: ./dir
+

dockerfile

Alternate Dockerfile.

Compose uses an alternate file to build with. A build path must also be specified.

build:
+  context: .
+  dockerfile: Dockerfile-alternate
+

args

Add build arguments, which are environment variables accessible only during the build process.

First, specify the arguments in your Dockerfile:

# syntax=docker/dockerfile:1
+
+ARG buildno
+ARG gitcommithash
+
+RUN echo "Build number: $buildno"
+RUN echo "Based on commit: $gitcommithash"
+

Then specify the arguments under the build key. You can pass a mapping or a list:

build:
+  context: .
+  args:
+    buildno: 1
+    gitcommithash: cdc3b19
+
build:
+  context: .
+  args:
+    - buildno=1
+    - gitcommithash=cdc3b19
+

Scope of build-args

In your Dockerfile, if you specify ARG before the FROM instruction, ARG is not available in the build instructions under FROM. If you need an argument to be available in both places, also specify it under the FROM instruction. Refer to the understand how ARGS and FROM interact section in the documentation for usage details.

You can omit the value when specifying a build argument, in which case its value at build time is the value in the environment where Compose is running.

args:
+  - buildno
+  - gitcommithash
+

Tip when using boolean values

YAML boolean values ("true", "false", "yes", "no", "on", "off") must be enclosed in quotes, so that the parser interprets them as strings.

cache_from

Added in version 3.2 file format

A list of images that the engine uses for cache resolution.

build:
+  context: .
+  cache_from:
+    - alpine:latest
+    - corp/web_app:3.14
+

labels

Added in version 3.3 file format

Add metadata to the resulting image using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

build:
+  context: .
+  labels:
+    com.example.description: "Accounting webapp"
+    com.example.department: "Finance"
+    com.example.label-with-empty-value: ""
+
build:
+  context: .
+  labels:
+    - "com.example.description=Accounting webapp"
+    - "com.example.department=Finance"
+    - "com.example.label-with-empty-value"
+

network

Added in version 3.4 file format

Set the network containers connect to for the RUN instructions during build.

build:
+  context: .
+  network: host
+
build:
+  context: .
+  network: custom_network_1
+

Use none to disable networking during build:

build:
+  context: .
+  network: none
+

shm_size

Added in version 3.5 file format

Set the size of the /dev/shm partition for this build’s containers. Specify as an integer value representing the number of bytes or as a string expressing a byte value.

build:
+  context: .
+  shm_size: '2gb'
+
build:
+  context: .
+  shm_size: 10000000
+

target

Added in version 3.4 file format

Build the specified stage as defined inside the Dockerfile. See the multi-stage build docs for details.

build:
+  context: .
+  target: prod
+

cap_add, cap_drop

Add or drop container capabilities. See man 7 capabilities for a full list.

cap_add:
+  - ALL
+
+cap_drop:
+  - NET_ADMIN
+  - SYS_ADMIN
+

Note when using docker stack deploy

The cap_add and cap_drop options are ignored when deploying a stack in swarm mode

cgroup_parent

Specify an optional parent cgroup for the container.

cgroup_parent: m-executor-abcd
+

Note when using docker stack deploy

The cgroup_parent option is ignored when deploying a stack in swarm mode

command

Override the default command.

command: bundle exec thin -p 3000
+

The command can also be a list, in a manner similar to dockerfile:

command: ["bundle", "exec", "thin", "-p", "3000"]
+

configs

Grant access to configs on a per-service basis using the per-service configs configuration. Two different syntax variants are supported.

Note: The config must already exist or be defined in the top-level configs configuration of this stack file, or stack deployment fails.

For more information on configs, see configs.

Short syntax

The short syntax variant only specifies the config name. This grants the container access to the config and mounts it at /<config_name> within the container. The source name and destination mountpoint are both set to the config name.

The following example uses the short syntax to grant the redis service access to the my_config and my_other_config configs. The value of my_config is set to the contents of the file ./my_config.txt, and my_other_config is defined as an external resource, which means that it has already been defined in Docker, either by running the docker config create command or by another stack deployment. If the external config does not exist, the stack deployment fails with a config not found error.

Added in version 3.3 file format.

config definitions are only supported in version 3.3 and higher of the compose file format.

version: "3.9"
+services:
+  redis:
+    image: redis:latest
+    deploy:
+      replicas: 1
+    configs:
+      - my_config
+      - my_other_config
+configs:
+  my_config:
+    file: ./my_config.txt
+  my_other_config:
+    external: true
+

Long syntax

The long syntax provides more granularity in how the config is created within the service’s task containers.

The following example sets the name of my_config to redis_config within the container, sets the mode to 0440 (group-readable) and sets the user and group to 103. The redis service does not have access to the my_other_config config.

version: "3.9"
+services:
+  redis:
+    image: redis:latest
+    deploy:
+      replicas: 1
+    configs:
+      - source: my_config
+        target: /redis_config
+        uid: '103'
+        gid: '103'
+        mode: 0440
+configs:
+  my_config:
+    file: ./my_config.txt
+  my_other_config:
+    external: true
+

You can grant a service access to multiple configs and you can mix long and short syntax. Defining a config does not imply granting a service access to it.

container_name

Specify a custom container name, rather than a generated default name.

container_name: my-web-container
+

Because Docker container names must be unique, you cannot scale a service beyond 1 container if you have specified a custom name. Attempting to do so results in an error.

Note when using docker stack deploy

The container_name option is ignored when deploying a stack in swarm mode

credential_spec

Added in version 3.3 file format.

The credential_spec option was added in v3.3. Using group Managed Service Account (gMSA) configurations with compose files is supported in file format version 3.8 or up.

Configure the credential spec for managed service account. This option is only used for services using Windows containers. The credential_spec must be in the format file://<filename> or registry://<value-name>.

When using file:, the referenced file must be present in the CredentialSpecs subdirectory in the Docker data directory, which defaults to C:\ProgramData\Docker\ on Windows. The following example loads the credential spec from a file named C:\ProgramData\Docker\CredentialSpecs\my-credential-spec.json.

credential_spec:
+  file: my-credential-spec.json
+

When using registry:, the credential spec is read from the Windows registry on the daemon’s host. A registry value with the given name must be located in:

HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs
+

The following example load the credential spec from a value named my-credential-spec in the registry:

credential_spec:
+  registry: my-credential-spec
+

Example gMSA configuration

When configuring a gMSA credential spec for a service, you only need to specify a credential spec with config, as shown in the following example:

version: "3.9"
+services:
+  myservice:
+    image: myimage:latest
+    credential_spec:
+      config: my_credential_spec
+
+configs:
+  my_credentials_spec:
+    file: ./my-credential-spec.json|
+

depends_on

Express dependency between services. Service dependencies cause the following behaviors:

Simple example:

version: "3.9"
+services:
+  web:
+    build: .
+    depends_on:
+      - db
+      - redis
+  redis:
+    image: redis
+  db:
+    image: postgres
+

There are several things to be aware of when using depends_on:

deploy

Added in version 3 file format.

Specify configuration related to the deployment and running of services. The following
sub-options only takes effect when deploying to a swarm with docker stack deploy, and is ignored by docker-compose up and docker-compose run, except for resources.

version: "3.9"
+services:
+  redis:
+    image: redis:alpine
+    deploy:
+      replicas: 6
+      placement:
+        max_replicas_per_node: 1
+      update_config:
+        parallelism: 2
+        delay: 10s
+      restart_policy:
+        condition: on-failure
+

Several sub-options are available:

endpoint_mode

Added in version 3.2 file format.

Specify a service discovery method for external clients connecting to a swarm.

version: "3.9"
+
+services:
+  wordpress:
+    image: wordpress
+    ports:
+      - "8080:80"
+    networks:
+      - overlay
+    deploy:
+      mode: replicated
+      replicas: 2
+      endpoint_mode: vip
+
+  mysql:
+    image: mysql
+    volumes:
+       - db-data:/var/lib/mysql/data
+    networks:
+       - overlay
+    deploy:
+      mode: replicated
+      replicas: 2
+      endpoint_mode: dnsrr
+
+volumes:
+  db-data:
+
+networks:
+  overlay:
+

The options for endpoint_mode also work as flags on the swarm mode CLI command docker service create. For a quick list of all swarm related docker commands, see Swarm mode CLI commands.

To learn more about service discovery and networking in swarm mode, see Configure service discovery in the swarm mode topics.

labels

Specify labels for the service. These labels are only set on the service, and not on any containers for the service.

version: "3.9"
+services:
+  web:
+    image: web
+    deploy:
+      labels:
+        com.example.description: "This label will appear on the web service"
+

To set labels on containers instead, use the labels key outside of deploy:

version: "3.9"
+services:
+  web:
+    image: web
+    labels:
+      com.example.description: "This label will appear on all containers for the web service"
+

mode

Either global (exactly one container per swarm node) or replicated (a specified number of containers). The default is replicated. (To learn more, see Replicated and global services in the swarm topics.)

version: "3.9"
+services:
+  worker:
+    image: dockersamples/examplevotingapp_worker
+    deploy:
+      mode: global
+

placement

Specify placement of constraints and preferences. See the docker service create documentation for a full description of the syntax and available types of constraints, preferences, and specifying the maximum replicas per node

version: "3.9"
+services:
+  db:
+    image: postgres
+    deploy:
+      placement:
+        constraints:
+          - "node.role==manager"
+          - "engine.labels.operatingsystem==ubuntu 18.04"
+        preferences:
+          - spread: node.labels.zone
+

max_replicas_per_node

Added in version 3.8 file format.

If the service is replicated (which is the default), limit the number of replicas that can run on a node at any time.

When there are more tasks requested than running nodes, an error no suitable node (max replicas per node limit exceed) is raised.

version: "3.9"
+services:
+  worker:
+    image: dockersamples/examplevotingapp_worker
+    networks:
+      - frontend
+      - backend
+    deploy:
+      mode: replicated
+      replicas: 6
+      placement:
+        max_replicas_per_node: 1
+

replicas

If the service is replicated (which is the default), specify the number of containers that should be running at any given time.

version: "3.9"
+services:
+  worker:
+    image: dockersamples/examplevotingapp_worker
+    networks:
+      - frontend
+      - backend
+    deploy:
+      mode: replicated
+      replicas: 6
+

resources

Configures resource constraints.

Changed in compose-file version 3

The resources section replaces the older resource constraint options in Compose files prior to version 3 (cpu_shares, cpu_quota, cpuset, mem_limit, memswap_limit, mem_swappiness). Refer to Upgrading version 2.x to 3.x to learn about differences between version 2 and 3 of the compose-file format.

Each of these is a single value, analogous to its docker service create counterpart.

In this general example, the redis service is constrained to use no more than 50M of memory and 0.50 (50% of a single core) of available processing time (CPU), and has 20M of memory and 0.25 CPU time reserved (as always available to it).

version: "3.9"
+services:
+  redis:
+    image: redis:alpine
+    deploy:
+      resources:
+        limits:
+          cpus: '0.50'
+          memory: 50M
+        reservations:
+          cpus: '0.25'
+          memory: 20M
+

The topics below describe available options to set resource constraints on services or containers in a swarm.

Looking for options to set resources on non swarm mode containers?

The options described here are specific to the deploy key and swarm mode. If you want to set resource constraints on non swarm deployments, use Compose file format version 2 CPU, memory, and other resource options. If you have further questions, refer to the discussion on the GitHub issue docker/compose/4513.

Out Of Memory Exceptions (OOME)

If your services or containers attempt to use more memory than the system has available, you may experience an Out Of Memory Exception (OOME) and a container, or the Docker daemon, might be killed by the kernel OOM killer. To prevent this from happening, ensure that your application runs on hosts with adequate memory and see Understand the risks of running out of memory.

restart_policy

Configures if and how to restart containers when they exit. Replaces restart.

version: "3.9"
+services:
+  redis:
+    image: redis:alpine
+    deploy:
+      restart_policy:
+        condition: on-failure
+        delay: 5s
+        max_attempts: 3
+        window: 120s
+

rollback_config

Added in version 3.7 file format.

Configures how the service should be rollbacked in case of a failing update.

update_config

Configures how the service should be updated. Useful for configuring rolling updates.

Added in version 3.4 file format.

The order option is only supported by v3.4 and higher of the compose file format.

version: "3.9"
+services:
+  vote:
+    image: dockersamples/examplevotingapp_vote:before
+    depends_on:
+      - redis
+    deploy:
+      replicas: 2
+      update_config:
+        parallelism: 2
+        delay: 10s
+        order: stop-first
+

Not supported for docker stack deploy +

The following sub-options (supported for docker-compose up and docker-compose run) are not supported for docker stack deploy or the deploy key.

Tip

See the section on how to configure volumes for services, swarms, and docker-stack.yml files. Volumes are supported but to work with swarms and services, they must be configured as named volumes or associated with services that are constrained to nodes with access to the requisite volumes.

devices

List of device mappings. Uses the same format as the --device docker client create option.

devices:
+  - "/dev/ttyUSB0:/dev/ttyUSB0"
+

Note when using docker stack deploy

The devices option is ignored when deploying a stack in swarm mode

dns

Custom DNS servers. Can be a single value or a list.

dns: 8.8.8.8
+
dns:
+  - 8.8.8.8
+  - 9.9.9.9
+

Custom DNS search domains. Can be a single value or a list.

dns_search: example.com
+
dns_search:
+  - dc1.example.com
+  - dc2.example.com
+

entrypoint

Override the default entrypoint.

entrypoint: /code/entrypoint.sh
+

The entrypoint can also be a list, in a manner similar to dockerfile:

entrypoint: ["php", "-d", "memory_limit=-1", "vendor/bin/phpunit"]
+

Note

Setting entrypoint both overrides any default entrypoint set on the service’s image with the ENTRYPOINT Dockerfile instruction, and clears out any default command on the image - meaning that if there’s a CMD instruction in the Dockerfile, it is ignored.

env_file

Add environment variables from a file. Can be a single value or a list.

If you have specified a Compose file with docker-compose -f FILE, paths in env_file are relative to the directory that file is in.

Environment variables declared in the environment section override these values – this holds true even if those values are empty or undefined.

env_file: .env
+
env_file:
+  - ./common.env
+  - ./apps/web.env
+  - /opt/runtime_opts.env
+

Compose expects each line in an env file to be in VAR=VAL format. Lines beginning with # are treated as comments and are ignored. Blank lines are also ignored.

# Set Rails/Rack environment
+RACK_ENV=development
+

Compose also recognizes inline comments, like in:

MY_VAR = value # this is a comment
+

To avoid interpreting “#” as an inline comment, use the quotation marks:

MY_VAR = "All the # inside are taken as part of the value"
+

Note

If your service specifies a build option, variables defined in environment files are not automatically visible during the build. Use the args sub-option of build to define build-time environment variables.

The value of VAL is used as is and not modified at all. For example if the value is surrounded by quotes (as is often the case of shell variables), the quotes are included in the value passed to Compose.

Keep in mind that the order of files in the list is significant in determining the value assigned to a variable that shows up more than once. The files in the list are processed from the top down. For the same variable specified in file a.env and assigned a different value in file b.env, if b.env is listed below (after), then the value from b.env stands. For example, given the following declaration in docker-compose.yml:

services:
+  some-service:
+    env_file:
+      - a.env
+      - b.env
+

And the following files:

# a.env
+VAR=1
+

and

# b.env
+VAR=hello
+

$VAR is hello.

environment

Add environment variables. You can use either an array or a dictionary. Any boolean values (true, false, yes, no) need to be enclosed in quotes to ensure they are not converted to True or False by the YML parser.

Environment variables with only a key are resolved to their values on the machine Compose is running on, which can be helpful for secret or host-specific values.

environment:
+  RACK_ENV: development
+  SHOW: 'true'
+  SESSION_SECRET:
+
environment:
+  - RACK_ENV=development
+  - SHOW=true
+  - SESSION_SECRET
+

Note

If your service specifies a build option, variables defined in environment are not automatically visible during the build. Use the args sub-option of build to define build-time environment variables.

expose

Expose ports without publishing them to the host machine - they’ll only be accessible to linked services. Only the internal port can be specified.

expose:
+  - "3000"
+  - "8000"
+

Link to containers started outside this docker-compose.yml or even outside of Compose, especially for containers that provide shared or common services. external_links follow semantics similar to the legacy option links when specifying both the container name and the link alias (CONTAINER:ALIAS).

external_links:
+  - redis_1
+  - project_db_1:mysql
+  - project_db_1:postgresql
+

Note

The externally-created containers must be connected to at least one of the same networks as the service that is linking to them. Links are a legacy option. We recommend using networks instead.

Note when using docker stack deploy

The external_links option is ignored when deploying a stack in swarm mode

extra_hosts

Add hostname mappings. Use the same values as the docker client --add-host parameter.

extra_hosts:
+  - "somehost:162.242.195.82"
+  - "otherhost:50.31.209.229"
+

An entry with the ip address and hostname is created in /etc/hosts inside containers for this service, e.g:

162.242.195.82  somehost
+50.31.209.229   otherhost
+

healthcheck

Configure a check that’s run to determine whether or not containers for this service are “healthy”. See the docs for the HEALTHCHECK Dockerfile instruction for details on how healthchecks work.

healthcheck:
+  test: ["CMD", "curl", "-f", "http://localhost"]
+  interval: 1m30s
+  timeout: 10s
+  retries: 3
+  start_period: 40s
+

interval, timeout and start_period are specified as durations.

Added in version 3.4 file format.

The start_period option was added in file format 3.4.

test must be either a string or a list. If it’s a list, the first item must be either NONE, CMD or CMD-SHELL. If it’s a string, it’s equivalent to specifying CMD-SHELL followed by that string.

# Hit the local web app
+test: ["CMD", "curl", "-f", "http://localhost"]
+

As above, but wrapped in /bin/sh. Both forms below are equivalent.

test: ["CMD-SHELL", "curl -f http://localhost || exit 1"]
+
test: curl -f https://localhost || exit 1
+

To disable any default healthcheck set by the image, you can use disable: true. This is equivalent to specifying test: ["NONE"].

healthcheck:
+  disable: true
+

image

Specify the image to start the container from. Can either be a repository/tag or a partial image ID.

image: redis
+
image: ubuntu:18.04
+
image: tutum/influxdb
+
image: example-registry.com:4000/postgresql
+
image: a4bc65fd
+

If the image does not exist, Compose attempts to pull it, unless you have also specified build, in which case it builds it using the specified options and tags it with the specified tag.

init

Added in version 3.7 file format.

Run an init inside the container that forwards signals and reaps processes. Set this option to true to enable this feature for the service.

version: "3.9"
+services:
+  web:
+    image: alpine:latest
+    init: true
+

The default init binary that is used is Tini, and is installed in /usr/libexec/docker-init on the daemon host. You can configure the daemon to use a custom init binary through the init-path configuration option.

isolation

Specify a container’s isolation technology. On Linux, the only supported value is default. On Windows, acceptable values are default, process and hyperv. Refer to the Docker Engine docs for details.

labels

Add metadata to containers using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Accounting webapp"
+  com.example.department: "Finance"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Accounting webapp"
+  - "com.example.department=Finance"
+  - "com.example.label-with-empty-value"
+

Warning

The --link flag is a legacy feature of Docker. It may eventually be removed. Unless you absolutely need to continue using it, we recommend that you use user-defined networks to facilitate communication between two containers instead of using --link.

One feature that user-defined networks do not support that you can do with --link is sharing environmental variables between containers. However, you can use other mechanisms such as volumes to share environment variables between containers in a more controlled way.

Link to containers in another service. Either specify both the service name and a link alias ("SERVICE:ALIAS"), or just the service name.

web:
+  links:
+    - "db"
+    - "db:database"
+    - "redis"
+

Containers for the linked service are reachable at a hostname identical to the alias, or the service name if no alias was specified.

Links are not required to enable services to communicate - by default, any service can reach any other service at that service’s name. (See also, the Links topic in Networking in Compose.)

Links also express dependency between services in the same way as depends_on, so they determine the order of service startup.

Note

If you define both links and networks, services with links between them must share at least one network in common to communicate.

Note when using docker stack deploy

The links option is ignored when deploying a stack in swarm mode

logging

Logging configuration for the service.

logging:
+  driver: syslog
+  options:
+    syslog-address: "tcp://192.168.0.42:123"
+

The driver name specifies a logging driver for the service’s containers, as with the --log-driver option for docker run (documented here).

The default value is json-file.

driver: "json-file"
+
driver: "syslog"
+
driver: "none"
+

Note

Only the json-file and journald drivers make the logs available directly from docker-compose up and docker-compose logs. Using any other driver does not print any logs.

Specify logging options for the logging driver with the options key, as with the --log-opt option for docker run.

Logging options are key-value pairs. An example of syslog options:

driver: "syslog"
+options:
+  syslog-address: "tcp://192.168.0.42:123"
+

The default driver json-file, has options to limit the amount of logs stored. To do this, use a key-value pair for maximum storage size and maximum number of files:

options:
+  max-size: "200k"
+  max-file: "10"
+

The example shown above would store log files until they reach a max-size of 200kB, and then rotate them. The amount of individual log files stored is specified by the max-file value. As logs grow beyond the max limits, older log files are removed to allow storage of new logs.

Here is an example docker-compose.yml file that limits logging storage:

version: "3.9"
+services:
+  some-service:
+    image: some-service
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "200k"
+        max-file: "10"
+

Logging options available depend on which logging driver you use

The above example for controlling log files and sizes uses options specific to the json-file driver. These particular options are not available on other logging drivers. For a full list of supported logging drivers and their options, refer to the logging drivers documentation.

network_mode

Network mode. Use the same values as the docker client --network parameter, plus the special form service:[service name].

network_mode: "bridge"
+
network_mode: "host"
+
network_mode: "none"
+
network_mode: "service:[service name]"
+
network_mode: "container:[container name/id]"
+

Note

networks

Networks to join, referencing entries under the top-level networks key.

services:
+  some-service:
+    networks:
+     - some-network
+     - other-network
+

aliases

Aliases (alternative hostnames) for this service on the network. Other containers on the same network can use either the service name or this alias to connect to one of the service’s containers.

Since aliases is network-scoped, the same service can have different aliases on different networks.

Note

A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name resolves to is not guaranteed.

The general format is shown here.

services:
+  some-service:
+    networks:
+      some-network:
+        aliases:
+          - alias1
+          - alias3
+      other-network:
+        aliases:
+          - alias2
+

In the example below, three services are provided (web, worker, and db), along with two networks (new and legacy). The db service is reachable at the hostname db or database on the new network, and at db or mysql on the legacy network.

version: "3.9"
+
+services:
+  web:
+    image: "nginx:alpine"
+    networks:
+      - new
+
+  worker:
+    image: "my-worker-image:latest"
+    networks:
+      - legacy
+
+  db:
+    image: mysql
+    networks:
+      new:
+        aliases:
+          - database
+      legacy:
+        aliases:
+          - mysql
+
+networks:
+  new:
+  legacy:
+

ipv4_address, ipv6_address

Specify a static IP address for containers for this service when joining the network.

The corresponding network configuration in the top-level networks section must have an ipam block with subnet configurations covering each static address.

If you’d like to use IPv6, you must first ensure that the Docker daemon is configured to support IPv6. See Enable IPv6 for detailed instructions. You can then access IPv6 addressing in a version 3.x Compose file by editing the /etc/docker/daemon.json to contain: {"ipv6": true, "fixed-cidr-v6": "2001:db8:1::/64"}

Then, reload the docker daemon and edit docker-compose.yml to contain the following under the service:

    sysctls:
+      - net.ipv6.conf.all.disable_ipv6=0
+

The enable_ipv6 option is only available in a version 2.x Compose file. IPv6 options do not currently work in swarm mode.

An example:

version: "3.9"
+
+services:
+  app:
+    image: nginx:alpine
+    networks:
+      app_net:
+        ipv4_address: 172.16.238.10
+        ipv6_address: 2001:3984:3989::10
+
+networks:
+  app_net:
+    ipam:
+      driver: default
+      config:
+        - subnet: "172.16.238.0/24"
+        - subnet: "2001:3984:3989::/64"
+

pid

pid: "host"
+

Sets the PID mode to the host PID mode. This turns on sharing between container and the host operating system the PID address space. Containers launched with this flag can access and manipulate other containers in the bare-metal machine’s namespace and vice versa.

ports

Expose ports.

Note

Port mapping is incompatible with network_mode: host

Note

docker-compose run ignores ports unless you include --service-ports.

Short syntax

There are three options:

Note

When mapping ports in the HOST:CONTAINER format, you may experience erroneous results when using a container port lower than 60, because YAML parses numbers in the format xx:yy as a base-60 value. For this reason, we recommend always explicitly specifying your port mappings as strings.

ports:
+  - "3000"
+  - "3000-3005"
+  - "8000:8000"
+  - "9090-9091:8080-8081"
+  - "49100:22"
+  - "127.0.0.1:8001:8001"
+  - "127.0.0.1:5000-5010:5000-5010"
+  - "127.0.0.1::5000"
+  - "6060:6060/udp"
+  - "12400-12500:1240"
+

Long syntax

The long form syntax allows the configuration of additional fields that can’t be expressed in the short form.

ports:
+  - target: 80
+    published: 8080
+    protocol: tcp
+    mode: host
+

Added in version 3.2 file format.

The long syntax is new in the v3.2 file format.

profiles

profiles: ["frontend", "debug"]
+profiles:
+  - frontend
+  - debug
+

profiles defines a list of named profiles for the service to be enabled under. When not set, the service is always enabled. For the services that make up your core application you should omit profiles so they will always be started.

Valid profile names follow the regex format [a-zA-Z0-9][a-zA-Z0-9_.-]+.

See also Using profiles with Compose to learn more about profiles.

restart

no is the default restart policy, and it does not restart a container under any circumstance. When always is specified, the container always restarts. The on-failure policy restarts a container if the exit code indicates an on-failure error. unless-stopped always restarts a container, except when the container is stopped (manually or otherwise).

restart: "no"
+restart: always
+restart: on-failure
+restart: unless-stopped
+

Note when using docker stack deploy

The restart option is ignored when deploying a stack in swarm mode.

secrets

Grant access to secrets on a per-service basis using the per-service secrets configuration. Two different syntax variants are supported.

Note when using docker stack deploy

The secret must already exist or be defined in the top-level secrets configuration of the compose file, or stack deployment fails.

For more information on secrets, see secrets.

Short syntax

The short syntax variant only specifies the secret name. This grants the container access to the secret and mounts it at /run/secrets/<secret_name> within the container. The source name and destination mountpoint are both set to the secret name.

The following example uses the short syntax to grant the redis service access to the my_secret and my_other_secret secrets. The value of my_secret is set to the contents of the file ./my_secret.txt, and my_other_secret is defined as an external resource, which means that it has already been defined in Docker, either by running the docker secret create command or by another stack deployment. If the external secret does not exist, the stack deployment fails with a secret not found error.

version: "3.9"
+services:
+  redis:
+    image: redis:latest
+    deploy:
+      replicas: 1
+    secrets:
+      - my_secret
+      - my_other_secret
+secrets:
+  my_secret:
+    file: ./my_secret.txt
+  my_other_secret:
+    external: true
+

Long syntax

The long syntax provides more granularity in how the secret is created within the service’s task containers.

The following example sets name of the my_secret to redis_secret within the container, sets the mode to 0440 (group-readable) and sets the user and group to 103. The redis service does not have access to the my_other_secret secret.

version: "3.9"
+services:
+  redis:
+    image: redis:latest
+    deploy:
+      replicas: 1
+    secrets:
+      - source: my_secret
+        target: redis_secret
+        uid: '103'
+        gid: '103'
+        mode: 0440
+secrets:
+  my_secret:
+    file: ./my_secret.txt
+  my_other_secret:
+    external: true
+

You can grant a service access to multiple secrets and you can mix long and short syntax. Defining a secret does not imply granting a service access to it.

security_opt

Override the default labeling scheme for each container.

security_opt:
+  - label:user:USER
+  - label:role:ROLE
+

Note when using docker stack deploy

The security_opt option is ignored when deploying a stack in swarm mode.

stop_grace_period

Specify how long to wait when attempting to stop a container if it doesn’t handle SIGTERM (or whatever stop signal has been specified with stop_signal), before sending SIGKILL. Specified as a duration.

stop_grace_period: 1s
+
stop_grace_period: 1m30s
+

By default, stop waits 10 seconds for the container to exit before sending SIGKILL.

stop_signal

Sets an alternative signal to stop the container. By default stop uses SIGTERM. Setting an alternative signal using stop_signal causes stop to send that signal instead.

stop_signal: SIGUSR1
+

sysctls

Kernel parameters to set in the container. You can use either an array or a dictionary.

sysctls:
+  net.core.somaxconn: 1024
+  net.ipv4.tcp_syncookies: 0
+
sysctls:
+  - net.core.somaxconn=1024
+  - net.ipv4.tcp_syncookies=0
+

You can only use sysctls that are namespaced in the kernel. Docker does not support changing sysctls inside a container that also modify the host system. For an overview of supported sysctls, refer to configure namespaced kernel parameters (sysctls) at runtime.

Note when using docker stack deploy

This option requires Docker Engine 19.03 or up when deploying a stack in swarm mode.

tmpfs

Added in version 3.6 file format.

Mount a temporary file system inside the container. Can be a single value or a list.

tmpfs: /run
+
tmpfs:
+  - /run
+  - /tmp
+

Note when using docker stack deploy

This option is ignored when deploying a stack in swarm mode with a (version 3-3.5) Compose file.

Mount a temporary file system inside the container. Size parameter specifies the size of the tmpfs mount in bytes. Unlimited by default.

- type: tmpfs
+  target: /app
+  tmpfs:
+    size: 1000
+

ulimits

Override the default ulimits for a container. You can either specify a single limit as an integer or soft/hard limits as a mapping.

ulimits:
+  nproc: 65535
+  nofile:
+    soft: 20000
+    hard: 40000
+

userns_mode

userns_mode: "host"
+

Disables the user namespace for this service, if Docker daemon is configured with user namespaces. See dockerd for more information.

Note when using docker stack deploy

The userns_mode option is ignored when deploying a stack in swarm mode.

volumes

Mount host paths or named volumes, specified as sub-options to a service.

You can mount a host path as part of a definition for a single service, and there is no need to define it in the top level volumes key.

But, if you want to reuse a volume across multiple services, then define a named volume in the top-level volumes key. Use named volumes with services, swarms, and stack files.

Changed in version 3 file format.

The top-level volumes key defines a named volume and references it from each service’s volumes list. This replaces volumes_from in earlier versions of the Compose file format.

This example shows a named volume (mydata) being used by the web service, and a bind mount defined for a single service (first path under db service volumes). The db service also uses a named volume called dbdata (second path under db service volumes), but defines it using the old string format for mounting a named volume. Named volumes must be listed under the top-level volumes key, as shown.

version: "3.9"
+services:
+  web:
+    image: nginx:alpine
+    volumes:
+      - type: volume
+        source: mydata
+        target: /data
+        volume:
+          nocopy: true
+      - type: bind
+        source: ./static
+        target: /opt/app/static
+
+  db:
+    image: postgres:latest
+    volumes:
+      - "/var/run/postgres/postgres.sock:/var/run/postgres/postgres.sock"
+      - "dbdata:/var/lib/postgresql/data"
+
+volumes:
+  mydata:
+  dbdata:
+

Note

For general information on volumes, refer to the use volumes and volume plugins sections in the documentation.

Short syntax

The short syntax uses the generic [SOURCE:]TARGET[:MODE] format, where SOURCE can be either a host path or volume name. TARGET is the container path where the volume is mounted. Standard modes are ro for read-only and rw for read-write (default).

You can mount a relative path on the host, which expands relative to the directory of the Compose configuration file being used. Relative paths should always begin with . or ...

volumes:
+  # Just specify a path and let the Engine create a volume
+  - /var/lib/mysql
+
+  # Specify an absolute path mapping
+  - /opt/data:/var/lib/mysql
+
+  # Path on the host, relative to the Compose file
+  - ./cache:/tmp/cache
+
+  # User-relative path
+  - ~/configs:/etc/configs/:ro
+
+  # Named volume
+  - datavolume:/var/lib/mysql
+

Long syntax

Added in version 3.2 file format.

The long form syntax allows the configuration of additional fields that can’t be expressed in the short form.

version: "3.9"
+services:
+  web:
+    image: nginx:alpine
+    ports:
+      - "80:80"
+    volumes:
+      - type: volume
+        source: mydata
+        target: /data
+        volume:
+          nocopy: true
+      - type: bind
+        source: ./static
+        target: /opt/app/static
+
+networks:
+  webnet:
+
+volumes:
+  mydata:
+

Note

When creating bind mounts, using the long syntax requires the referenced folder to be created beforehand. Using the short syntax creates the folder on the fly if it doesn’t exist. See the bind mounts documentation for more information.

Volumes for services, swarms, and stack files

Note when using docker stack deploy

When working with services, swarms, and docker-stack.yml files, keep in mind that the tasks (containers) backing a service can be deployed on any node in a swarm, and this may be a different node each time the service is updated.

In the absence of having named volumes with specified sources, Docker creates an anonymous volume for each task backing a service. Anonymous volumes do not persist after the associated containers are removed.

If you want your data to persist, use a named volume and a volume driver that is multi-host aware, so that the data is accessible from any node. Or, set constraints on the service so that its tasks are deployed on a node that has the volume present.

As an example, the docker-stack.yml file for the votingapp sample in Docker Labs defines a service called db that runs a postgres database. It is configured as a named volume to persist the data on the swarm, and is constrained to run only on manager nodes. Here is the relevant snip-it from that file:

version: "3.9"
+services:
+  db:
+    image: postgres:9.4
+    volumes:
+      - db-data:/var/lib/postgresql/data
+    networks:
+      - backend
+    deploy:
+      placement:
+        constraints: [node.role == manager]
+

domainname, hostname, ipc, mac_address, privileged, read_only, shm_size, stdin_open, tty, user, working_dir

Each of these is a single value, analogous to its docker run counterpart. Note that mac_address is a legacy option.

user: postgresql
+working_dir: /code
+
+domainname: foo.com
+hostname: foo
+ipc: host
+mac_address: 02:42:ac:11:65:43
+
+privileged: true
+
+
+read_only: true
+shm_size: 64M
+stdin_open: true
+tty: true
+

Specifying durations

Some configuration options, such as the interval and timeout sub-options for check, accept a duration as a string in a format that looks like this:

2.5s
+10s
+1m30s
+2h32m
+5h34m56s
+

The supported units are us, ms, s, m and h.

Specifying byte values

Some configuration options, such as the shm_size sub-option for build, accept a byte value as a string in a format that looks like this:

2b
+1024kb
+2048k
+300m
+1gb
+

The supported units are b, k, m and g, and their alternative notation kb, mb and gb. Decimal values are not supported at this time.

Volume configuration reference

While it is possible to declare volumes on the fly as part of the service declaration, this section allows you to create named volumes that can be reused across multiple services (without relying on volumes_from), and are easily retrieved and inspected using the docker command line or API. See the docker volume subcommand documentation for more information.

See use volumes and volume plugins for general information on volumes.

Here’s an example of a two-service setup where a database’s data directory is shared with another service as a volume so that it can be periodically backed up:

version: "3.9"
+
+services:
+  db:
+    image: db
+    volumes:
+      - data-volume:/var/lib/db
+  backup:
+    image: backup-service
+    volumes:
+      - data-volume:/var/lib/backup/data
+
+volumes:
+  data-volume:
+

An entry under the top-level volumes key can be empty, in which case it uses the default driver configured by the Engine (in most cases, this is the local driver). Optionally, you can configure it with the following keys:

driver

Specify which volume driver should be used for this volume. Defaults to whatever driver the Docker Engine has been configured to use, which in most cases is local. If the driver is not available, the Engine returns an error when docker-compose up tries to create the volume.

driver: foobar
+

driver_opts

Specify a list of options as key-value pairs to pass to the driver for this volume. Those options are driver-dependent - consult the driver’s documentation for more information. Optional.

volumes:
+  example:
+    driver_opts:
+      type: "nfs"
+      o: "addr=10.40.0.199,nolock,soft,rw"
+      device: ":/docker/example"
+

external

If set to true, specifies that this volume has been created outside of Compose. docker-compose up does not attempt to create it, and raises an error if it doesn’t exist.

For version 3.3 and below of the format, external cannot be used in conjunction with other volume configuration keys (driver, driver_opts, labels). This limitation no longer exists for version 3.4 and above.

In the example below, instead of attempting to create a volume called [projectname]_data, Compose looks for an existing volume simply called data and mount it into the db service’s containers.

version: "3.9"
+
+services:
+  db:
+    image: postgres
+    volumes:
+      - data:/var/lib/postgresql/data
+
+volumes:
+  data:
+    external: true
+

Deprecated in version 3.4 file format.

external.name was deprecated in version 3.4 file format use name instead.

You can also specify the name of the volume separately from the name used to refer to it within the Compose file:

volumes:
+  data:
+    external:
+      name: actual-name-of-volume
+

Note when using docker stack deploy

External volumes that do not exist are created if you use docker stack deploy to launch the app in swarm mode (instead of docker compose up). In swarm mode, a volume is automatically created when it is defined by a service. As service tasks are scheduled on new nodes, swarmkit creates the volume on the local node. To learn more, see moby/moby#29976.

labels

Add metadata to containers using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Database volume"
+  com.example.department: "IT/Ops"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Database volume"
+  - "com.example.department=IT/Ops"
+  - "com.example.label-with-empty-value"
+

name

Added in version 3.4 file format.

Set a custom name for this volume. The name field can be used to reference volumes that contain special characters. The name is used as is and will not be scoped with the stack name.

version: "3.9"
+volumes:
+  data:
+    name: my-app-data
+

It can also be used in conjunction with the external property:

version: "3.9"
+volumes:
+  data:
+    external: true
+    name: my-app-data
+

Network configuration reference

The top-level networks key lets you specify networks to be created.

driver

Specify which driver should be used for this network.

The default driver depends on how the Docker Engine you’re using is configured, but in most instances it is bridge on a single host and overlay on a Swarm.

The Docker Engine returns an error if the driver is not available.

driver: overlay
+

bridge

Docker defaults to using a bridge network on a single host. For examples of how to work with bridge networks, see the Docker Labs tutorial on Bridge networking.

overlay

The overlay driver creates a named network across multiple nodes in a swarm.

host or none

Use the host’s networking stack, or no networking. Equivalent to docker run --net=host or docker run --net=none. Only used if you use docker stack commands. If you use the docker-compose command, use network_mode instead.

If you want to use a particular network on a common build, use [network] as mentioned in the second yaml file example.

The syntax for using built-in networks such as host and none is a little different. Define an external network with the name host or none (that Docker has already created automatically) and an alias that Compose can use (hostnet or nonet in the following examples), then grant the service access to that network using the alias.

version: "3.9"
+services:
+  web:
+    networks:
+      hostnet: {}
+
+networks:
+  hostnet:
+    external: true
+    name: host
+
services:
+  web:
+    ...
+    build:
+      ...
+      network: host
+      context: .
+      ...
+
services:
+  web:
+    ...
+    networks:
+      nonet: {}
+
+networks:
+  nonet:
+    external: true
+    name: none
+

driver_opts

Specify a list of options as key-value pairs to pass to the driver for this network. Those options are driver-dependent - consult the driver’s documentation for more information. Optional.

driver_opts:
+  foo: "bar"
+  baz: 1
+

attachable

Added in version 3.2 file format.

Only used when the driver is set to overlay. If set to true, then standalone containers can attach to this network, in addition to services. If a standalone container attaches to an overlay network, it can communicate with services and standalone containers that are also attached to the overlay network from other Docker daemons.

networks:
+  mynet1:
+    driver: overlay
+    attachable: true
+

enable_ipv6

Enable IPv6 networking on this network.

Not supported in Compose File version 3

enable_ipv6 requires you to use a version 2 Compose file, as this directive is not yet supported in Swarm mode.

ipam

Specify custom IPAM config. This is an object with several properties, each of which is optional:

A full example:

ipam:
+  driver: default
+  config:
+    - subnet: 172.28.0.0/16
+

Note

Additional IPAM configurations, such as gateway, are only honored for version 2 at the moment.

internal

By default, Docker also connects a bridge network to it to provide external connectivity. If you want to create an externally isolated overlay network, you can set this option to true.

labels

Add metadata to containers using Docker labels. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Financial transaction network"
+  com.example.department: "Finance"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Financial transaction network"
+  - "com.example.department=Finance"
+  - "com.example.label-with-empty-value"
+

external

If set to true, specifies that this network has been created outside of Compose. docker-compose up does not attempt to create it, and raises an error if it doesn’t exist.

For version 3.3 and below of the format, external cannot be used in conjunction with other network configuration keys (driver, driver_opts, ipam, internal). This limitation no longer exists for version 3.4 and above.

In the example below, proxy is the gateway to the outside world. Instead of attempting to create a network called [projectname]_outside, Compose looks for an existing network simply called outside and connect the proxy service’s containers to it.

version: "3.9"
+
+services:
+  proxy:
+    build: ./proxy
+    networks:
+      - outside
+      - default
+  app:
+    build: ./app
+    networks:
+      - default
+
+networks:
+  outside:
+    external: true
+

Deprecated in version 3.5 file format.

external.name was deprecated in version 3.5 file format use name instead.

You can also specify the name of the network separately from the name used to refer to it within the Compose file:

version: "3.9"
+networks:
+  outside:
+    external:
+      name: actual-name-of-network
+

name

Added in version 3.5 file format.

Set a custom name for this network. The name field can be used to reference networks which contain special characters. The name is used as is and will not be scoped with the stack name.

version: "3.9"
+networks:
+  network1:
+    name: my-app-net
+

It can also be used in conjunction with the external property:

version: "3.9"
+networks:
+  network1:
+    external: true
+    name: my-app-net
+

configs configuration reference

The top-level configs declaration defines or references configs that can be granted to the services in this stack. The source of the config is either file or external.

In this example, my_first_config is created (as <stack_name>_my_first_config)when the stack is deployed, and my_second_config already exists in Docker.

configs:
+  my_first_config:
+    file: ./config_data
+  my_second_config:
+    external: true
+

Another variant for external configs is when the name of the config in Docker is different from the name that exists within the service. The following example modifies the previous one to use the external config called redis_config.

configs:
+  my_first_config:
+    file: ./config_data
+  my_second_config:
+    external:
+      name: redis_config
+

You still need to grant access to the config to each service in the stack.

secrets configuration reference

The top-level secrets declaration defines or references secrets that can be granted to the services in this stack. The source of the secret is either file or external.

In this example, my_first_secret is created as <stack_name>_my_first_secretwhen the stack is deployed, and my_second_secret already exists in Docker.

secrets:
+  my_first_secret:
+    file: ./secret_data
+  my_second_secret:
+    external: true
+

Another variant for external secrets is when the name of the secret in Docker is different from the name that exists within the service. The following example modifies the previous one to use the external secret called redis_secret.

Compose File v3.5 and above

secrets:
+  my_first_secret:
+    file: ./secret_data
+  my_second_secret:
+    external: true
+    name: redis_secret
+

Compose File v3.4 and under

  my_second_secret:
+    external:
+      name: redis_secret
+

You still need to grant access to the secrets to each service in the stack.

Variable substitution

Your configuration options can contain environment variables. Compose uses the variable values from the shell environment in which docker-compose is run. For example, suppose the shell contains POSTGRES_VERSION=9.3 and you supply this configuration:

db:
+  image: "postgres:${POSTGRES_VERSION}"
+

When you run docker-compose up with this configuration, Compose looks for the POSTGRES_VERSION environment variable in the shell and substitutes its value in. For this example, Compose resolves the image to postgres:9.3 before running the configuration.

If an environment variable is not set, Compose substitutes with an empty string. In the example above, if POSTGRES_VERSION is not set, the value for the image option is postgres:.

You can set default values for environment variables using a .env file, which Compose automatically looks for in project directory (parent folder of your Compose file). Values set in the shell environment override those set in the .env file.

Note when using docker stack deploy

The .env file feature only works when you use the docker-compose up command and does not work with docker stack deploy.

Both $VARIABLE and ${VARIABLE} syntax are supported. Additionally when using the 2.1 file format, it is possible to provide inline default values using typical shell syntax:

Similarly, the following syntax allows you to specify mandatory variables:

Other extended shell-style features, such as ${VARIABLE/foo/bar}, are not supported.

You can use a $$ (double-dollar sign) when your configuration needs a literal dollar sign. This also prevents Compose from interpolating a value, so a $$ allows you to refer to environment variables that you don’t want processed by Compose.

web:
+  build: .
+  command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE"
+

If you forget and use a single dollar sign ($), Compose interprets the value as an environment variable and warns you:

The VAR_NOT_INTERPOLATED_BY_COMPOSE is not set. Substituting an empty string.
+

Extension fields

Added in version 3.4 file format.

It is possible to re-use configuration fragments using extension fields. Those special fields can be of any format as long as they are located at the root of your Compose file and their name start with the x- character sequence.

Note

Starting with the 3.7 format (for the 3.x series) and 2.4 format (for the 2.x series), extension fields are also allowed at the root of service, volume, network, config and secret definitions.

version: "3.9"
+x-custom:
+  items:
+    - a
+    - b
+  options:
+    max-size: '12m'
+  name: "custom"
+

The contents of those fields are ignored by Compose, but they can be inserted in your resource definitions using YAML anchors. For example, if you want several of your services to use the same logging configuration:

logging:
+  options:
+    max-size: '12m'
+    max-file: '5'
+  driver: json-file
+

You may write your Compose file as follows:

version: "3.9"
+x-logging:
+  &default-logging
+  options:
+    max-size: '12m'
+    max-file: '5'
+  driver: json-file
+
+services:
+  web:
+    image: myapp/web:latest
+    logging: *default-logging
+  db:
+    image: mysql:latest
+    logging: *default-logging
+

It is also possible to partially override values in extension fields using the YAML merge type. For example:

version: "3.9"
+x-volumes:
+  &default-volume
+  driver: foobar-storage
+
+services:
+  web:
+    image: myapp/web:latest
+    volumes: ["vol1", "vol2", "vol3"]
+volumes:
+  vol1: *default-volume
+  vol2:
+    << : *default-volume
+    name: volume02
+  vol3:
+    << : *default-volume
+    driver: default
+    name: volume-local
+

Compose documentation

+

fig, composition, compose version 3, docker

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/compose-file/compose-file-v3/ +

+
diff --git a/devdocs/docker/compose%2Fcompose-file%2Fcompose-versioning%2Findex.html b/devdocs/docker/compose%2Fcompose-file%2Fcompose-versioning%2Findex.html new file mode 100644 index 00000000..2a8d7ac1 --- /dev/null +++ b/devdocs/docker/compose%2Fcompose-file%2Fcompose-versioning%2Findex.html @@ -0,0 +1,151 @@ +

Compose file versions and upgrading

+ +

The Compose file is a YAML file defining services, networks, and volumes for a Docker application.

The Compose file formats are now described in these references, specific to each version.

Reference file What changed in this version
+Compose Specification (most current, and recommended) Versioning
Version 3 Version 3 updates
Version 2 Version 2 updates
Version 1 (Deprecated) Version 1 updates

The topics below explain the differences among the versions, Docker Engine compatibility, and how to upgrade.

Compatibility matrix

There are several versions of the Compose file format – 1, 2, 2.x, and 3.x

This table shows which Compose file versions support specific Docker releases.

Compose file format Docker Engine release
Compose specification 19.03.0+
3.8 19.03.0+
3.7 18.06.0+
3.6 18.02.0+
3.5 17.12.0+
3.4 17.09.0+
3.3 17.06.0+
3.2 17.04.0+
3.1 1.13.1+
3.0 1.13.0+
2.4 17.12.0+
2.3 17.06.0+
2.2 1.13.0+
2.1 1.12.0+
2.0 1.10.0+

In addition to Compose file format versions shown in the table, the Compose itself is on a release schedule, as shown in Compose releases, but file format versions do not necessarily increment with each release. For example, Compose file format 3.0 was first introduced in Compose release 1.10.0, and versioned gradually in subsequent releases.

The latest Compose file format is defined by the Compose Specification and is implemented by Docker Compose 1.27.0+.

Looking for more detail on Docker and Compose compatibility?

We recommend keeping up-to-date with newer releases as much as possible. However, if you are using an older version of Docker and want to determine which Compose release is compatible, refer to the Compose release notes. Each set of release notes gives details on which versions of Docker Engine are supported, along with compatible Compose file format versions. (See also, the discussion in issue #3404.)

For details on versions and how to upgrade, see Versioning and Upgrading.

Versioning

There are three legacy versions of the Compose file format:

The latest and recommended version of the Compose file format is defined by the Compose Specification. This format merges the 2.x and 3.x versions and is implemented by Compose 1.27.0+.

v2 and v3 Declaration

Note: When specifying the Compose file version to use, make sure to specify both the major and minor numbers. If no minor version is given, 0 is used by default and not the latest minor version.

The Compatibility Matrix shows Compose file versions mapped to Docker Engine releases.

To move your project to a later version, see the Upgrading section.

Note: If you’re using multiple Compose files or extending services, each file must be of the same version - you cannot, for example, mix version 1 and 2 in a single project.

Several things differ depending on which version you use:

These differences are explained below.

Version 1 (Deprecated)

Compose files that do not declare a version are considered “version 1”. In those files, all the services are declared at the root of the document.

Version 1 is supported by Compose up to 1.6.x. It will be deprecated in a future Compose release.

Version 1 files cannot declare named volumes, networks or build arguments.

Compose does not take advantage of networking when you use version 1: every container is placed on the default bridge network and is reachable from every other container at its IP address. You need to use links to enable discovery between containers.

Example:

web:
+  build: .
+  ports:
+   - "8000:5000"
+  volumes:
+   - .:/code
+  links:
+   - redis
+redis:
+  image: redis
+

Version 2

Compose files using the version 2 syntax must indicate the version number at the root of the document. All services must be declared under the services key.

Version 2 files are supported by Compose 1.6.0+ and require a Docker Engine of version 1.10.0+.

Named volumes can be declared under the volumes key, and networks can be declared under the networks key.

By default, every container joins an application-wide default network, and is discoverable at a hostname that’s the same as the service name. This means links are largely unnecessary. For more details, see Networking in Compose.

Note

When specifying the Compose file version to use, make sure to specify both the major and minor numbers. If no minor version is given, 0 is used by default and not the latest minor version. As a result, features added in later versions will not be supported. For example:

version: "2"
+

is equivalent to:

version: "2.0"
+

Simple example:

version: "2.4"
+services:
+  web:
+    build: .
+    ports:
+     - "8000:5000"
+    volumes:
+     - .:/code
+  redis:
+    image: redis
+

A more extended example, defining volumes and networks:

version: "2.4"
+services:
+  web:
+    build: .
+    ports:
+     - "8000:5000"
+    volumes:
+     - .:/code
+    networks:
+      - front-tier
+      - back-tier
+  redis:
+    image: redis
+    volumes:
+      - redis-data:/var/lib/redis
+    networks:
+      - back-tier
+volumes:
+  redis-data:
+    driver: local
+networks:
+  front-tier:
+    driver: bridge
+  back-tier:
+    driver: bridge
+

Several other options were added to support networking, such as:

Variable substitution also was added in Version 2.

Version 2.1

An upgrade of version 2 that introduces new parameters only available with Docker Engine version 1.12.0+. Version 2.1 files are supported by Compose 1.9.0+.

Introduces the following additional parameters:

Version 2.2

An upgrade of version 2.1 that introduces new parameters only available with Docker Engine version 1.13.0+. Version 2.2 files are supported by Compose 1.13.0+. This version also allows you to specify default scale numbers inside the service’s configuration.

Introduces the following additional parameters:

Version 2.3

An upgrade of version 2.2 that introduces new parameters only available with Docker Engine version 17.06.0+. Version 2.3 files are supported by Compose 1.16.0+.

Introduces the following additional parameters:

Version 2.4

An upgrade of version 2.3 that introduces new parameters only available with Docker Engine version 17.12.0+. Version 2.4 files are supported by Compose 1.21.0+.

Introduces the following additional parameters:

Version 3

Designed to be cross-compatible between Compose and the Docker Engine’s swarm mode, version 3 removes several options and adds several more.

Note: When specifying the Compose file version to use, make sure to specify both the major and minor numbers. If no minor version is given, 0 is used by default and not the latest minor version. As a result, features added in later versions will not be supported. For example:

version: "3"
+

is equivalent to:

version: "3.0"
+

Version 3.1

An upgrade of version 3 that introduces new parameters only available with Docker Engine version 1.13.1+, and higher.

Introduces the following additional parameters:

Version 3.2

An upgrade of version 3 that introduces new parameters only available with Docker Engine version 17.04.0+, and higher.

Introduces the following additional parameters:

Version 3.3

An upgrade of version 3 that introduces new parameters only available with Docker Engine version 17.06.0+, and higher.

Introduces the following additional parameters:

Version 3.4

An upgrade of version 3 that introduces new parameters. It is only available with Docker Engine version 17.09.0 and higher.

Introduces the following additional parameters:

Version 3.5

An upgrade of version 3 that introduces new parameters. It is only available with Docker Engine version 17.12.0 and higher.

Introduces the following additional parameters:

Version 3.6

An upgrade of version 3 that introduces new parameters. It is only available with Docker Engine version 18.02.0 and higher.

Introduces the following additional parameters:

Version 3.7

An upgrade of version 3 that introduces new parameters. It is only available with Docker Engine version 18.06.0 and higher.

Introduces the following additional parameters:

Version 3.8

An upgrade of version 3 that introduces new parameters. It is only available with Docker Engine version 19.03.0 and higher.

Introduces the following additional parameters:

Upgrading

Version 2.x to 3.x

Between versions 2.x and 3.x, the structure of the Compose file is the same, but several options have been removed:

Version 1 to 2.x

In the majority of cases, moving from version 1 to 2 is a very simple process:

  1. Indent the whole file by one level and put a services: key at the top.
  2. Add a version: '2' line at the top of the file.

It’s more complicated if you’re using particular configuration features:

Compatibility mode

docker-compose 1.20.0 introduces a new --compatibility flag designed to help developers transition to version 3 more easily. When enabled, docker-compose reads the deploy section of each service’s definition and attempts to translate it into the equivalent version 2 parameter. Currently, the following deploy keys are translated:

All other keys are ignored and produce a warning if present. You can review the configuration that will be used to deploy by using the --compatibility flag with the config command.

Do not use this in production!

We recommend against using --compatibility mode in production. Because the resulting configuration is only an approximate using non-Swarm mode properties, it may produce unexpected results.

Compose file format references

+

fig, composition, compose, versions, upgrading, docker

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/compose-file/compose-versioning/ +

+
diff --git a/devdocs/docker/compose%2Fcompose-file%2Fdeploy%2Findex.html b/devdocs/docker/compose%2Fcompose-file%2Fdeploy%2Findex.html new file mode 100644 index 00000000..ac3b0330 --- /dev/null +++ b/devdocs/docker/compose%2Fcompose-file%2Fdeploy%2Findex.html @@ -0,0 +1,126 @@ +

Compose file deploy reference

+ +

Compose specification is a platform-neutral way to define multi-container applications. A Compose implementation supporting deployment of application model MAY require some additional metadata as the Compose application model is way too abstract to reflect actual infrastructure needs per service, or lifecycle constraints.

Compose Specification Deployment allows users to declare additional metadata on services so Compose implementations get relevant data to allocate adequate resources on platform and configure them to match user’s needs.

Definitions

Compose Specification is extended to support an OPTIONAL deploy subsection on services. This section define runtime requirements for a service.

endpoint_mode

endpoint_mode specifies a service discovery method for external clients connecting to a service. Default and available values are platform specific, anyway the Compose specification define two canonical values:

services:
+  frontend:
+    image: awesome/webapp
+    ports:
+      - "8080:80"
+    deploy:
+      mode: replicated
+      replicas: 2
+      endpoint_mode: vip
+

labels

labels specifies metadata for the service. These labels MUST only be set on the service and not on any containers for the service. This assumes the platform has some native concept of “service” that can match Compose application model.

services:
+  frontend:
+    image: awesome/webapp
+    deploy:
+      labels:
+        com.example.description: "This label will appear on the web service"
+

mode

mode define the replication model used to run the service on platform. Either global (exactly one container per physical node) or replicated (a specified number of containers). The default is replicated.

services:
+  frontend:
+    image: awesome/webapp
+    deploy:
+      mode: global
+

placement

placement specifies constraints and preferences for platform to select a physical node to run service containers.

constraints

constraints defines a REQUIRED property the platform’s node MUST fulfill to run service container. Can be set either by a list or a map with string values.

deploy:
+  placement:
+    constraints:
+      - disktype=ssd
+
deploy:
+  placement:
+    constraints:
+      disktype: ssd
+

preferences

preferences defines a property the platform’s node SHOULD fulfill to run service container. Can be set either by a list or a map with string values.

deploy:
+  placement:
+    preferences:
+      - datacenter=us-east
+
deploy:
+  placement:
+    preferences:
+      datacenter: us-east
+

replicas

If the service is replicated (which is the default), replicas specifies the number of containers that SHOULD be running at any given time.

services:
+  frontend:
+    image: awesome/webapp
+    deploy:
+      mode: replicated
+      replicas: 6
+

resources

resources configures physical resource constraints for container to run on platform. Those constraints can be configured as a:

services:
+  frontend:
+    image: awesome/webapp
+    deploy:
+      resources:
+        limits:
+          cpus: '0.50'
+          memory: 50M
+          pids: 1
+        reservations:
+          cpus: '0.25'
+          memory: 20M
+

cpus

cpus configures a limit or reservation for how much of the available CPU resources (as number of cores) a container can use.

memory

memory configures a limit or reservation on the amount of memory a container can allocate, set as a string expressing a byte value.

pids

pids tunes a container’s PIDs limit, set as an integer.

devices

devices configures reservations of the devices a container can use. It contains a list of reservations, each set as an object with the following parameters: capabilities, driver, count, device_ids and options.

Devices are reserved using a list of capabilities, making capabilities the only required field. A device MUST satisfy all the requested capabilities for a successful reservation.

capabilities

capabilities are set as a list of strings, expressing both generic and driver specific capabilities. The following generic capabilities are recognized today:

To avoid name clashes, driver specific capabilities MUST be prefixed with the driver name. For example, reserving an nVidia CUDA-enabled accelerator might look like this:

deploy:
+  resources:
+    reservations:
+      devices:
+        - capabilities: ["nvidia-compute"]
+
driver

A different driver for the reserved device(s) can be requested using driver field. The value is specified as a string.

deploy:
+  resources:
+    reservations:
+      devices:
+        - capabilities: ["nvidia-compute"]
+          driver: nvidia
+
count

If count is set to all or not specified, Compose implementations MUST reserve all devices that satisfy the requested capabilities. Otherwise, Compose implementations MUST reserve at least the number of devices specified. The value is specified as an integer.

deploy:
+  resources:
+    reservations:
+      devices:
+        - capabilities: ["tpu"]
+          count: 2
+

count and device_ids fields are exclusive. Compose implementations MUST return an error if both are specified.

device_ids

If device_ids is set, Compose implementations MUST reserve devices with the specified IDs providing they satisfy the requested capabilities. The value is specified as a list of strings.

deploy:
+  resources:
+    reservations:
+      devices:
+        - capabilities: ["gpu"]
+          device_ids: ["GPU-f123d1c9-26bb-df9b-1c23-4a731f61d8c7"]
+

count and device_ids fields are exclusive. Compose implementations MUST return an error if both are specified.

options

Driver specific options can be set with options as key-value pairs.

deploy:
+  resources:
+    reservations:
+      devices:
+        - capabilities: ["gpu"]
+          driver: gpuvendor
+          options:
+            virtualization: false
+

restart_policy

restart_policy configures if and how to restart containers when they exit. If restart_policy is not set, Compose implementations MUST consider restart field set by service configuration.

deploy:
+  restart_policy:
+    condition: on-failure
+    delay: 5s
+    max_attempts: 3
+    window: 120s
+

rollback_config

rollback_config configures how the service should be rollbacked in case of a failing update.

update_config

update_config configures how the service should be updated. Useful for configuring rolling updates.

deploy:
+  update_config:
+    parallelism: 2
+    delay: 10s
+    order: stop-first
+
+

fig, composition, compose, docker

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/compose-file/deploy/ +

+
diff --git a/devdocs/docker/compose%2Fcompose-file%2Findex.html b/devdocs/docker/compose%2Fcompose-file%2Findex.html new file mode 100644 index 00000000..31fed1f0 --- /dev/null +++ b/devdocs/docker/compose%2Fcompose-file%2Findex.html @@ -0,0 +1,812 @@ +

Compose specification

+ +

The Compose file is a YAML file defining services, networks, and volumes for a Docker application. The latest and recommended version of the Compose file format is defined by the Compose Specification. The Compose spec merges the legacy 2.x and 3.x versions, aggregating properties across these formats and is implemented by Compose 1.27.0+.

Status of this document

This document specifies the Compose file format used to define multi-containers applications. Distribution of this document is unlimited.

The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL NOT”, “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in RFC 2119.

Requirements and optional attributes

The Compose specification includes properties designed to target a local OCI container runtime, exposing Linux kernel specific configuration options, but also some Windows container specific properties, as well as cloud platform features related to resource placement on a cluster, replicated application distribution and scalability.

We acknowledge that no Compose implementation is expected to support all attributes, and that support for some properties is Platform dependent and can only be confirmed at runtime. The definition of a versioned schema to control the supported properties in a Compose file, established by the docker-compose tool where the Compose file format was designed, doesn’t offer any guarantee to the end-user attributes will be actually implemented.

The specification defines the expected configuration syntax and behavior, but - until noted - supporting any of those is OPTIONAL.

A Compose implementation to parse a Compose file using unsupported attributes SHOULD warn user. We recommend implementors to support those running modes:

The Compose application model

The Compose specification allows one to define a platform-agnostic container based application. Such an application is designed as a set of containers which have to both run together with adequate shared resources and communication channels.

Computing components of an application are defined as Services. A Service is an abstract concept implemented on platforms by running the same container image (and configuration) one or more times.

Services communicate with each other through Networks. In this specification, a Network is a platform capability abstraction to establish an IP route between containers within services connected together. Low-level, platform-specific networking options are grouped into the Network definition and MAY be partially implemented on some platforms.

Services store and share persistent data into Volumes. The specification describes such a persistent data as a high-level filesystem mount with global options. Actual platform-specific implementation details are grouped into the Volumes definition and MAY be partially implemented on some platforms.

Some services require configuration data that is dependent on the runtime or platform. For this, the specification defines a dedicated concept: Configs. From a Service container point of view, Configs are comparable to Volumes, in that they are files mounted into the container. But the actual definition involves distinct platform resources and services, which are abstracted by this type.

A Secret is a specific flavor of configuration data for sensitive data that SHOULD NOT be exposed without security considerations. Secrets are made available to services as files mounted into their containers, but the platform-specific resources to provide sensitive data are specific enough to deserve a distinct concept and definition within the Compose specification.

Distinction within Volumes, Configs and Secret allows implementations to offer a comparable abstraction at service level, but cover the specific configuration of adequate platform resources for well identified data usages.

A Project is an individual deployment of an application specification on a platform. A project’s name is used to group resources together and isolate them from other applications or other installation of the same Compose specified application with distinct parameters. A Compose implementation creating resources on a platform MUST prefix resource names by project and set the label com.docker.compose.project.

Project name can be set explicitly by top-level name attribute. Compose implementation MUST offer a way for user to set a custom project name and override this name, so that the same compose.yaml file can be deployed twice on the same infrastructure, without changes, by just passing a distinct name.

Illustrative example

The following example illustrates Compose specification concepts with a concrete example application. The example is non-normative.

Consider an application split into a frontend web application and a backend service.

The frontend is configured at runtime with an HTTP configuration file managed by infrastructure, providing an external domain name, and an HTTPS server certificate injected by the platform’s secured secret store.

The backend stores data in a persistent volume.

Both services communicate with each other on an isolated back-tier network, while frontend is also connected to a front-tier network and exposes port 443 for external usage.

(External user) --> 443 [frontend network]
+                            |
+                  +--------------------+
+                  |  frontend service  |...ro...<HTTP configuration>
+                  |      "webapp"      |...ro...<server certificate> #secured
+                  +--------------------+
+                            |
+                        [backend network]
+                            |
+                  +--------------------+
+                  |  backend service   |  r+w   ___________________
+                  |     "database"     |=======( persistent volume )
+                  +--------------------+        \_________________/
+

The example application is composed of the following parts:

services:
+  frontend:
+    image: awesome/webapp
+    ports:
+      - "443:8043"
+    networks:
+      - front-tier
+      - back-tier
+    configs:
+      - httpd-config
+    secrets:
+      - server-certificate
+
+  backend:
+    image: awesome/database
+    volumes:
+      - db-data:/etc/data
+    networks:
+      - back-tier
+
+volumes:
+  db-data:
+    driver: flocker
+    driver_opts:
+      size: "10GiB"
+
+configs:
+  httpd-config:
+    external: true
+
+secrets:
+  server-certificate:
+    external: true
+
+networks:
+  # The presence of these objects is sufficient to define them
+  front-tier: {}
+  back-tier: {}
+

This example illustrates the distinction between volumes, configs and secrets. While all of them are all exposed to service containers as mounted files or directories, only a volume can be configured for read+write access. Secrets and configs are read-only. The volume configuration allows you to select a volume driver and pass driver options to tweak volume management according to the actual infrastructure. Configs and Secrets rely on platform services, and are declared external as they are not managed as part of the application lifecycle: the Compose implementation will use a platform-specific lookup mechanism to retrieve runtime values.

Compose file

The Compose file is a YAML file defining version (DEPRECATED), services (REQUIRED), networks, volumes, configs and secrets. The default path for a Compose file is compose.yaml (preferred) or compose.yml in working directory. Compose implementations SHOULD also support docker-compose.yaml and docker-compose.yml for backward compatibility. If both files exist, Compose implementations MUST prefer canonical compose.yaml one.

Multiple Compose files can be combined together to define the application model. The combination of YAML files MUST be implemented by appending/overriding YAML elements based on Compose file order set by the user. Simple attributes and maps get overridden by the highest order Compose file, lists get merged by appending. Relative paths MUST be resolved based on the first Compose file’s parent folder, whenever complimentary files being merged are hosted in other folders.

As some Compose file elements can both be expressed as single strings or complex objects, merges MUST apply to the expanded form.

Profiles

Profiles allow to adjust the Compose application model for various usages and environments. A Compose implementation SHOULD allow the user to define a set of active profiles. The exact mechanism is implementation specific and MAY include command line flags, environment variables, etc.

The Services top-level element supports a profiles attribute to define a list of named profiles. Services without a profiles attribute set MUST always be enabled. A service MUST be ignored by the Compose implementation when none of the listed profiles match the active ones, unless the service is explicitly targeted by a command. In that case its profiles MUST be added to the set of active profiles. All other top-level elements are not affected by profiles and are always active.

References to other services (by links, extends or shared resource syntax service:xxx) MUST not automatically enable a component that would otherwise have been ignored by active profiles. Instead the Compose implementation MUST return an error.

Illustrative example

services:
+  foo:
+    image: foo
+  bar:
+    image: bar
+    profiles:
+      - test
+  baz:
+    image: baz
+    depends_on:
+      - bar
+    profiles:
+      - test
+  zot:
+    image: zot
+    depends_on:
+      - bar
+    profiles:
+      - debug
+

Version top-level element

Top-level version property is defined by the specification for backward compatibility but is only informative.

A Compose implementation SHOULD NOT use this version to select an exact schema to validate the Compose file, but prefer the most recent schema at the time it has been designed.

Compose implementations SHOULD validate whether they can fully parse the Compose file. If some fields are unknown, typically because the Compose file was written with fields defined by a newer version of the specification, Compose implementations SHOULD warn the user. Compose implementations MAY offer options to ignore unknown fields (as defined by “loose” mode).

Name top-level element

Top-level name property is defined by the specification as project name to be used if user doesn’t set one explicitly. Compose implementations MUST offer a way for user to override this name, and SHOULD define a mechanism to compute a default project name, to be used if the top-level name element is not set.

Whenever project name is defined by top-level name or by some custom mechanism, it MUST be exposed for interpolation and environment variable resolution as COMPOSE_PROJECT_NAME

services:
+  foo:
+    image: busybox
+    environment:
+      - COMPOSE_PROJECT_NAME
+    command: echo "I'm running ${COMPOSE_PROJECT_NAME}"
+

Services top-level element

A Service is an abstract definition of a computing resource within an application which can be scaled/replaced independently from other components. Services are backed by a set of containers, run by the platform according to replication requirements and placement constraints. Being backed by containers, Services are defined by a Docker image and set of runtime arguments. All containers within a service are identically created with these arguments.

A Compose file MUST declare a services root element as a map whose keys are string representations of service names, and whose values are service definitions. A service definition contains the configuration that is applied to each container started for that service.

Each service MAY also include a Build section, which defines how to create the Docker image for the service. Compose implementations MAY support building docker images using this service definition. If not implemented the Build section SHOULD be ignored and the Compose file MUST still be considered valid.

Build support is an OPTIONAL aspect of the Compose specification, and is described in detail in the Build support documentation.

Each Service defines runtime constraints and requirements to run its containers. The deploy section groups these constraints and allows the platform to adjust the deployment strategy to best match containers’ needs with available resources.

Deploy support is an OPTIONAL aspect of the Compose specification, and is described in detail in the Deployment support documentation. not implemented the Deploy section SHOULD be ignored and the Compose file MUST still be considered valid.

build

build specifies the build configuration for creating container image from source, as defined in the Build support documentation.

blkio_config

blkio_config defines a set of configuration options to set block IO limits for this service.

services:
+  foo:
+    image: busybox
+    blkio_config:
+       weight: 300
+       weight_device:
+         - path: /dev/sda
+           weight: 400
+       device_read_bps:
+         - path: /dev/sdb
+           rate: '12mb'
+       device_read_iops:
+         - path: /dev/sdb
+           rate: 120
+       device_write_bps:
+         - path: /dev/sdb
+           rate: '1024k'
+       device_write_iops:
+         - path: /dev/sdb
+           rate: 30
+

device_read_bps, device_write_bps

Set a limit in bytes per second for read / write operations on a given device. Each item in the list MUST have two keys:

device_read_iops, device_write_iops

Set a limit in operations per second for read / write operations on a given device. Each item in the list MUST have two keys:

weight

Modify the proportion of bandwidth allocated to this service relative to other services. Takes an integer value between 10 and 1000, with 500 being the default.

weight_device

Fine-tune bandwidth allocation by device. Each item in the list must have two keys:

cpu_count

cpu_count defines the number of usable CPUs for service container.

cpu_percent

cpu_percent defines the usable percentage of the available CPUs.

cpu_shares

cpu_shares defines (as integer value) service container relative CPU weight versus other containers.

cpu_period

cpu_period allow Compose implementations to configure CPU CFS (Completely Fair Scheduler) period when platform is based on Linux kernel.

cpu_quota

cpu_quota allow Compose implementations to configure CPU CFS (Completely Fair Scheduler) quota when platform is based on Linux kernel.

cpu_rt_runtime

cpu_rt_runtime configures CPU allocation parameters for platform with support for realtime scheduler. Can be either an integer value using microseconds as unit or a duration.

 cpu_rt_runtime: '400ms'
+ cpu_rt_runtime: 95000`
+

cpu_rt_period

cpu_rt_period configures CPU allocation parameters for platform with support for realtime scheduler. Can be either an integer value using microseconds as unit or a duration.

 cpu_rt_period: '1400us'
+ cpu_rt_period: 11000`
+

cpus

DEPRECATED: use deploy.reservations.cpus

cpus define the number of (potentially virtual) CPUs to allocate to service containers. This is a fractional number. 0.000 means no limit.

cpuset

cpuset defines the explicit CPUs in which to allow execution. Can be a range 0-3 or a list 0,1

cap_add

cap_add specifies additional container capabilities as strings.

cap_add:
+  - ALL
+

cap_drop

cap_drop specifies container capabilities to drop as strings.

cap_drop:
+  - NET_ADMIN
+  - SYS_ADMIN
+

cgroup_parent

cgroup_parent specifies an OPTIONAL parent cgroup for the container.

cgroup_parent: m-executor-abcd
+

command

command overrides the the default command declared by the container image (i.e. by Dockerfile’s CMD).

command: bundle exec thin -p 3000
+

The command can also be a list, in a manner similar to Dockerfile:

command: [ "bundle", "exec", "thin", "-p", "3000" ]
+

configs

configs grant access to configs on a per-service basis using the per-service configs configuration. Two different syntax variants are supported.

Compose implementations MUST report an error if config doesn’t exist on platform or isn’t defined in the configs section of this Compose file.

There are two syntaxes defined for configs. To remain compliant to this specification, an implementation MUST support both syntaxes. Implementations MUST allow use of both short and long syntaxes within the same document.

Short syntax

The short syntax variant only specifies the config name. This grants the container access to the config and mounts it at /<config_name> within the container. The source name and destination mount point are both set to the config name.

The following example uses the short syntax to grant the redis service access to the my_config and my_other_config configs. The value of my_config is set to the contents of the file ./my_config.txt, and my_other_config is defined as an external resource, which means that it has already been defined in the platform. If the external config does not exist, the deployment MUST fail.

services:
+  redis:
+    image: redis:latest
+    configs:
+      - my_config
+configs:
+  my_config:
+    file: ./my_config.txt
+  my_other_config:
+    external: true
+

Long syntax

The long syntax provides more granularity in how the config is created within the service’s task containers.

The following example sets the name of my_config to redis_config within the container, sets the mode to 0440 (group-readable) and sets the user and group to 103. The redis service does not have access to the my_other_config config.

services:
+  redis:
+    image: redis:latest
+    configs:
+      - source: my_config
+        target: /redis_config
+        uid: "103"
+        gid: "103"
+        mode: 0440
+configs:
+  my_config:
+    external: true
+  my_other_config:
+    external: true
+

You can grant a service access to multiple configs, and you can mix long and short syntax.

container_name

container_name is a string that specifies a custom container name, rather than a generated default name.

container_name: my-web-container
+

Compose implementation MUST NOT scale a service beyond one container if the Compose file specifies a container_name. Attempting to do so MUST result in an error.

If present, container_name SHOULD follow the regex format of [a-zA-Z0-9][a-zA-Z0-9_.-]+

credential_spec

credential_spec configures the credential spec for a managed service account.

Compose implementations that support services using Windows containers MUST support file: and registry: protocols for credential_spec. Compose implementations MAY also support additional protocols for custom use-cases.

The credential_spec must be in the format file://<filename> or registry://<value-name>.

credential_spec:
+  file: my-credential-spec.json
+

When using registry:, the credential spec is read from the Windows registry on the daemon’s host. A registry value with the given name must be located in:

HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs
+

The following example loads the credential spec from a value named my-credential-spec in the registry:

credential_spec:
+  registry: my-credential-spec
+

Example gMSA configuration

When configuring a gMSA credential spec for a service, you only need to specify a credential spec with config, as shown in the following example:

services:
+  myservice:
+    image: myimage:latest
+    credential_spec:
+      config: my_credential_spec
+
+configs:
+  my_credentials_spec:
+    file: ./my-credential-spec.json|
+

depends_on

depends_on expresses startup and shutdown dependencies between services.

Short syntax

The short syntax variant only specifies service names of the dependencies. Service dependencies cause the following behaviors:

Simple example:

services:
+  web:
+    build: .
+    depends_on:
+      - db
+      - redis
+  redis:
+    image: redis
+  db:
+    image: postgres
+

Compose implementations MUST guarantee dependency services have been started before starting a dependent service. Compose implementations MAY wait for dependency services to be “ready” before starting a dependent service.

Long syntax

The long form syntax enables the configuration of additional fields that can’t be expressed in the short form.

Service dependencies cause the following behaviors:

Simple example:

services:
+  web:
+    build: .
+    depends_on:
+      db:
+        condition: service_healthy
+      redis:
+        condition: service_started
+  redis:
+    image: redis
+  db:
+    image: postgres
+

Compose implementations MUST guarantee dependency services have been started before starting a dependent service. Compose implementations MUST guarantee dependency services marked with service_healthy are “healthy” before starting a dependent service.

deploy

deploy specifies the configuration for the deployment and lifecycle of services, as defined here.

device_cgroup_rules

device_cgroup_rules defines a list of device cgroup rules for this container. The format is the same format the Linux kernel specifies in the Control Groups Device Whitelist Controller.

device_cgroup_rules:
+  - 'c 1:3 mr'
+  - 'a 7:* rmw'
+

devices

devices defines a list of device mappings for created containers in the form of HOST_PATH:CONTAINER_PATH[:CGROUP_PERMISSIONS].

devices:
+  - "/dev/ttyUSB0:/dev/ttyUSB0"
+  - "/dev/sda:/dev/xvda:rwm"
+

dns

dns defines custom DNS servers to set on the container network interface configuration. Can be a single value or a list.

dns: 8.8.8.8
+
dns:
+  - 8.8.8.8
+  - 9.9.9.9
+

dns_opt

dns_opt list custom DNS options to be passed to the container’s DNS resolver (/etc/resolv.conf file on Linux).

dns_opt:
+  - use-vc
+  - no-tld-query
+

dns defines custom DNS search domains to set on container network interface configuration. Can be a single value or a list.

dns_search: example.com
+
dns_search:
+  - dc1.example.com
+  - dc2.example.com
+

domainname

domainname declares a custom domain name to use for the service container. MUST be a valid RFC 1123 hostname.

entrypoint

entrypoint overrides the default entrypoint for the Docker image (i.e. ENTRYPOINT set by Dockerfile). Compose implementations MUST clear out any default command on the Docker image - both ENTRYPOINT and CMD instruction in the Dockerfile - when entrypoint is configured by a Compose file. If command is also set, it is used as parameter to entrypoint as a replacement for Docker image’s CMD

entrypoint: /code/entrypoint.sh
+

The entrypoint can also be a list, in a manner similar to Dockerfile:

entrypoint:
+  - php
+  - -d
+  - zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so
+  - -d
+  - memory_limit=-1
+  - vendor/bin/phpunit
+

env_file

env_file adds environment variables to the container based on file content.

env_file: .env
+

env_file can also be a list. The files in the list MUST be processed from the top down. For the same variable specified in two env files, the value from the last file in the list MUST stand.

env_file:
+  - ./a.env
+  - ./b.env
+

Relative path MUST be resolved from the Compose file’s parent folder. As absolute paths prevent the Compose file from being portable, Compose implementations SHOULD warn users when such a path is used to set env_file.

Environment variables declared in the environment section MUST override these values – this holds true even if those values are empty or undefined.

Env_file format

Each line in an env file MUST be in VAR[=[VAL]] format. Lines beginning with # MUST be ignored. Blank lines MUST also be ignored.

The value of VAL is used as a raw string and not modified at all. If the value is surrounded by quotes (as is often the case for shell variables), the quotes MUST be included in the value passed to containers created by the Compose implementation.

VAL MAY be omitted, in such cases the variable value is empty string. =VAL MAY be omitted, in such cases the variable is unset.

# Set Rails/Rack environment
+RACK_ENV=development
+VAR="quoted"
+

environment

environment defines environment variables set in the container. environment can use either an array or a map. Any boolean values; true, false, yes, no, SHOULD be enclosed in quotes to ensure they are not converted to True or False by the YAML parser.

Environment variables MAY be declared by a single key (no value to equals sign). In such a case Compose implementations SHOULD rely on some user interaction to resolve the value. If they do not, the variable is unset and will be removed from the service container environment.

Map syntax:

environment:
+  RACK_ENV: development
+  SHOW: "true"
+  USER_INPUT:
+

Array syntax:

environment:
+  - RACK_ENV=development
+  - SHOW=true
+  - USER_INPUT
+

When both env_file and environment are set for a service, values set by environment have precedence.

expose

expose defines the ports that Compose implementations MUST expose from container. These ports MUST be accessible to linked services and SHOULD NOT be published to the host machine. Only the internal container ports can be specified.

expose:
+  - "3000"
+  - "8000"
+

extends

Extend another service, in the current file or another, optionally overriding configuration. You can use extends on any service together with other configuration keys. The extends value MUST be a mapping defined with a required service and an optional file key.

extends:
+  file: common.yml
+  service: webapp
+

If supported Compose implementations MUST process extends in the following way:

Restrictions

The following restrictions apply to the service being referenced:

Compose implementations MUST return an error in all of these cases.

Finding referenced service

file value can be:

Service denoted by service MUST be present in the identified referenced Compose file. Compose implementations MUST return an error if:

Merging service definitions

Two service definitions (main one in the current Compose file and referenced one specified by extends) MUST be merged in the following way:

Mappings

The following keys should be treated as mappings: build.args, build.labels, build.extra_hosts, deploy.labels, deploy.update_config, deploy.rollback_config, deploy.restart_policy, deploy.resources.limits, environment, healthcheck, labels, logging.options, sysctls, storage_opt, extra_hosts, ulimits.

One exception that applies to healthcheck is that main mapping cannot specify disable: true unless referenced mapping also specifies disable: true. Compose implementations MUST return an error in this case.

For example, the input below:

services:
+  common:
+    image: busybox
+    environment:
+      TZ: utc
+      PORT: 80
+  cli:
+    extends:
+      service: common
+    environment:
+      PORT: 8080
+

Produces the following configuration for the cli service. The same output is produced if array syntax is used.

environment:
+  PORT: 8080
+  TZ: utc
+image: busybox
+

Items under blkio_config.device_read_bps, blkio_config.device_read_iops, blkio_config.device_write_bps, blkio_config.device_write_iops, devices and volumes are also treated as mappings where key is the target path inside the container.

For example, the input below:

services:
+  common:
+    image: busybox
+    volumes:
+      - common-volume:/var/lib/backup/data:rw
+  cli:
+    extends:
+      service: common
+    volumes:
+      - cli-volume:/var/lib/backup/data:ro
+

Produces the following configuration for the cli service. Note that mounted path now points to the new volume name and ro flag was applied.

image: busybox
+volumes:
+- cli-volume:/var/lib/backup/data:ro
+

If referenced service definition contains extends mapping, the items under it are simply copied into the new merged definition. Merging process is then kicked off again until no extends keys are remaining.

For example, the input below:

services:
+  base:
+    image: busybox
+    user: root
+  common:
+    image: busybox
+    extends:
+      service: base
+  cli:
+    extends:
+      service: common
+

Produces the following configuration for the cli service. Here, cli services gets user key from common service, which in turn gets this key from base service.

image: busybox
+user: root
+
Sequences

The following keys should be treated as sequences: cap_add, cap_drop, configs, deploy.placement.constraints, deploy.placement.preferences, deploy.reservations.generic_resources, device_cgroup_rules, expose, external_links, ports, secrets, security_opt. Any duplicates resulting from the merge are removed so that the sequence only contains unique elements.

For example, the input below:

services:
+  common:
+    image: busybox
+    security_opt:
+      - label:role:ROLE
+  cli:
+    extends:
+      service: common
+    security_opt:
+      - label:user:USER
+

Produces the following configuration for the cli service.

image: busybox
+security_opt:
+- label:role:ROLE
+- label:user:USER
+

In case list syntax is used, the following keys should also be treated as sequences: dns, dns_search, env_file, tmpfs. Unlike sequence fields mentioned above, duplicates resulting from the merge are not removed.

Scalars

Any other allowed keys in the service definition should be treated as scalars.

external_links link service containers to services managed outside this Compose application. external_links define the name of an existing service to retrieve using the platform lookup mechanism. An alias of the form SERVICE:ALIAS can be specified.

external_links:
+  - redis
+  - database:mysql
+  - database:postgresql
+

extra_hosts

extra_hosts adds hostname mappings to the container network interface configuration (/etc/hosts for Linux). Values MUST set hostname and IP address for additional hosts in the form of HOSTNAME:IP.

extra_hosts:
+  - "somehost:162.242.195.82"
+  - "otherhost:50.31.209.229"
+

Compose implementations MUST create matching entry with the IP address and hostname in the container’s network configuration, which means for Linux /etc/hosts will get extra lines:

162.242.195.82  somehost
+50.31.209.229   otherhost
+

group_add

group_add specifies additional groups (by name or number) which the user inside the container MUST be a member of.

An example of where this is useful is when multiple containers (running as different users) need to all read or write the same file on a shared volume. That file can be owned by a group shared by all the containers, and specified in group_add.

services:
+  myservice:
+    image: alpine
+    group_add:
+      - mail
+

Running id inside the created container MUST show that the user belongs to the mail group, which would not have been the case if group_add were not declared.

healthcheck

healthcheck declares a check that’s run to determine whether or not containers for this service are “healthy”. This overrides HEALTHCHECK Dockerfile instruction set by the service’s Docker image.

healthcheck:
+  test: ["CMD", "curl", "-f", "http://localhost"]
+  interval: 1m30s
+  timeout: 10s
+  retries: 3
+  start_period: 40s
+

interval, timeout and start_period are specified as durations.

test defines the command the Compose implementation will run to check container health. It can be either a string or a list. If it’s a list, the first item must be either NONE, CMD or CMD-SHELL. If it’s a string, it’s equivalent to specifying CMD-SHELL followed by that string.

# Hit the local web app
+test: ["CMD", "curl", "-f", "http://localhost"]
+

Using CMD-SHELL will run the command configured as a string using the container’s default shell (/bin/sh for Linux). Both forms below are equivalent:

test: ["CMD-SHELL", "curl -f http://localhost || exit 1"]
+
test: curl -f https://localhost || exit 1
+

NONE disable the healthcheck, and is mostly useful to disable Healthcheck set by image. Alternatively the healthcheck set by the image can be disabled by setting disable: true:

healthcheck:
+  disable: true
+

hostname

hostname declares a custom host name to use for the service container. MUST be a valid RFC 1123 hostname.

image

image specifies the image to start the container from. Image MUST follow the Open Container Specification addressable image format, as [<registry>/][<project>/]<image>[:<tag>|@<digest>].

    image: redis
+    image: redis:5
+    image: redis@sha256:0ed5d5928d4737458944eb604cc8509e245c3e19d02ad83935398bc4b991aac7
+    image: library/redis
+    image: docker.io/library/redis
+    image: my_private.registry:5000/redis
+

If the image does not exist on the platform, Compose implementations MUST attempt to pull it based on the pull_policy. Compose implementations with build support MAY offer alternative options for the end user to control precedence of pull over building the image from source, however pulling the image MUST be the default behavior.

image MAY be omitted from a Compose file as long as a build section is declared. Compose implementations without build support MUST fail when image is missing from the Compose file.

init

init run an init process (PID 1) inside the container that forwards signals and reaps processes. Set this option to true to enable this feature for the service.

services:
+  web:
+    image: alpine:latest
+    init: true
+

The init binary that is used is platform specific.

ipc

ipc configures the IPC isolation mode set by service container. Available values are platform specific, but Compose specification defines specific values which MUST be implemented as described if supported:

    ipc: "shareable"
+    ipc: "service:[service name]"
+

isolation

isolation specifies a container’s isolation technology. Supported values are platform-specific.

labels

labels add metadata to containers. You can use either an array or a map.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Accounting webapp"
+  com.example.department: "Finance"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Accounting webapp"
+  - "com.example.department=Finance"
+  - "com.example.label-with-empty-value"
+

Compose implementations MUST create containers with canonical labels:

The com.docker.compose label prefix is reserved. Specifying labels with this prefix in the Compose file MUST result in a runtime error.

links defines a network link to containers in another service. Either specify both the service name and a link alias (SERVICE:ALIAS), or just the service name.

web:
+  links:
+    - db
+    - db:database
+    - redis
+

Containers for the linked service MUST be reachable at a hostname identical to the alias, or the service name if no alias was specified.

Links are not required to enable services to communicate - when no specific network configuration is set, any service MUST be able to reach any other service at that service’s name on the default network. If services do declare networks they are attached to, links SHOULD NOT override the network configuration and services not attached to a shared network SHOULD NOT be able to communicate. Compose implementations MAY NOT warn the user about this configuration mismatch.

Links also express implicit dependency between services in the same way as depends_on, so they determine the order of service startup.

logging

logging defines the logging configuration for the service.

logging:
+  driver: syslog
+  options:
+    syslog-address: "tcp://192.168.0.42:123"
+

The driver name specifies a logging driver for the service’s containers. The default and available values are platform specific. Driver specific options can be set with options as key-value pairs.

network_mode

network_mode set service containers network mode. Available values are platform specific, but Compose specification define specific values which MUST be implemented as described if supported:

    network_mode: "host"
+    network_mode: "none"
+    network_mode: "service:[service name]"
+

networks

networks defines the networks that service containers are attached to, referencing entries under the top-level networks key.

services:
+  some-service:
+    networks:
+      - some-network
+      - other-network
+

aliases

aliases declares alternative hostnames for this service on the network. Other containers on the same network can use either the service name or this alias to connect to one of the service’s containers.

Since aliases are network-scoped, the same service can have different aliases on different networks.

Note: A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name resolves to is not guaranteed.

The general format is shown here:

services:
+  some-service:
+    networks:
+      some-network:
+        aliases:
+          - alias1
+          - alias3
+      other-network:
+        aliases:
+          - alias2
+

In the example below, service frontend will be able to reach the backend service at the hostname backend or database on the back-tier network, and service monitoring will be able to reach same backend service at db or mysql on the admin network.

services:
+  frontend:
+    image: awesome/webapp
+    networks:
+      - front-tier
+      - back-tier
+
+  monitoring:
+    image: awesome/monitoring
+    networks:
+      - admin
+
+  backend:
+    image: awesome/backend
+    networks:
+      back-tier:
+        aliases:
+          - database
+      admin:
+        aliases:
+          - mysql
+
+networks:
+  front-tier:
+  back-tier:
+  admin:
+

ipv4_address, ipv6_address

Specify a static IP address for containers for this service when joining the network.

The corresponding network configuration in the top-level networks section MUST have an ipam block with subnet configurations covering each static address.

services:
+  frontend:
+    image: awesome/webapp
+    networks:
+      front-tier:
+        ipv4_address: 172.16.238.10
+        ipv6_address: 2001:3984:3989::10
+
+networks:
+  front-tier:
+    ipam:
+      driver: default
+      config:
+        - subnet: "172.16.238.0/24"
+        - subnet: "2001:3984:3989::/64"
+

link_local_ips specifies a list of link-local IPs. Link-local IPs are special IPs which belong to a well known subnet and are purely managed by the operator, usually dependent on the architecture where they are deployed. Implementation is Platform specific.

Example:

services:
+  app:
+    image: busybox
+    command: top
+    networks:
+      app_net:
+        link_local_ips:
+          - 57.123.22.11
+          - 57.123.22.13
+networks:
+  app_net:
+    driver: bridge
+

priority

priority indicates in which order Compose implementation SHOULD connect the service’s containers to its networks. If unspecified, the default value is 0.

In the following example, the app service connects to app_net_1 first as it has the highest priority. It then connects to app_net_3, then app_net_2, which uses the default priority value of 0.

services:
+  app:
+    image: busybox
+    command: top
+    networks:
+      app_net_1:
+        priority: 1000
+      app_net_2:
+
+      app_net_3:
+        priority: 100
+networks:
+  app_net_1:
+  app_net_2:
+  app_net_3:
+

mac_address

mac_address sets a MAC address for service container.

mem_limit

DEPRECATED: use deploy.limits.memory

mem_reservation

DEPRECATED: use deploy.reservations.memory

mem_swappiness

mem_swappiness defines as a percentage (a value between 0 and 100) for the host kernel to swap out anonymous memory pages used by a container.

Default value is platform specific.

memswap_limit

memswap_limit defines the amount of memory container is allowed to swap to disk. This is a modifier attribute that only has meaning if memory is also set. Using swap allows the container to write excess memory requirements to disk when the container has exhausted all the memory that is available to it. There is a performance penalty for applications that swap memory to disk often.

oom_kill_disable

If oom_kill_disable is set Compose implementation MUST configure the platform so it won’t kill the container in case of memory starvation.

oom_score_adj

oom_score_adj tunes the preference for containers to be killed by platform in case of memory starvation. Value MUST be within [-1000,1000] range.

pid

pid sets the PID mode for container created by the Compose implementation. Supported values are platform specific.

pids_limit

DEPRECATED: use deploy.reservations.pids

pids_limit tunes a container’s PIDs limit. Set to -1 for unlimited PIDs.

pids_limit: 10
+

platform

platform defines the target platform containers for this service will run on, using the os[/arch[/variant]] syntax. Compose implementation MUST use this attribute when declared to determine which version of the image will be pulled and/or on which platform the service’s build will be performed.

platform: osx
+platform: windows/amd64
+platform: linux/arm64/v8
+

ports

Exposes container ports. Port mapping MUST NOT be used with network_mode: host and doing so MUST result in a runtime error.

Short syntax

The short syntax is a colon-separated string to set host IP, host port and container port in the form:

[HOST:]CONTAINER[/PROTOCOL] where:

Host IP, if not set, MUST bind to all network interfaces. Port can be either a single value or a range. Host and container MUST use equivalent ranges.

Either specify both ports (HOST:CONTAINER), or just the container port. In the latter case, the Compose implementation SHOULD automatically allocate any unassigned host port.

HOST:CONTAINER SHOULD always be specified as a (quoted) string, to avoid conflicts with yaml base-60 float.

Samples:

ports:
+  - "3000"
+  - "3000-3005"
+  - "8000:8000"
+  - "9090-9091:8080-8081"
+  - "49100:22"
+  - "127.0.0.1:8001:8001"
+  - "127.0.0.1:5000-5010:5000-5010"
+  - "6060:6060/udp"
+

Note: Host IP mapping MAY not be supported on the platform, in such case Compose implementations SHOULD reject the Compose file and MUST inform the user they will ignore the specified host IP.

Long syntax

The long form syntax allows the configuration of additional fields that can’t be expressed in the short form.

ports:
+  - target: 80
+    host_ip: 127.0.0.1
+    published: 8080
+    protocol: tcp
+    mode: host
+
+  - target: 80
+    host_ip: 127.0.0.1
+    published: 8000-9000
+    protocol: tcp
+    mode: host
+

privileged

privileged configures the service container to run with elevated privileges. Support and actual impacts are platform-specific.

profiles

profiles defines a list of named profiles for the service to be enabled under. When not set, service is always enabled.

If present, profiles SHOULD follow the regex format of [a-zA-Z0-9][a-zA-Z0-9_.-]+.

pull_policy

pull_policy defines the decisions Compose implementations will make when it starts to pull images. Possible values are:

If pull_policy and build both presents, Compose implementations SHOULD build the image by default. Compose implementations MAY override this behavior in the toolchain.

read_only

read_only configures service container to be created with a read-only filesystem.

restart

restart defines the policy that the platform will apply on container termination.

    restart: "no"
+    restart: always
+    restart: on-failure
+    restart: unless-stopped
+

runtime

runtime specifies which runtime to use for the service’s containers.

The value of runtime is specific to implementation. For example, runtime can be the name of an implementation of OCI Runtime Spec, such as “runc”.

web:
+  image: busybox:latest
+  command: true
+  runtime: runc
+

scale

DEPRECATED: use deploy/replicas

scale specifies the default number of containers to deploy for this service.

secrets

secrets grants access to sensitive data defined by secrets on a per-service basis. Two different syntax variants are supported: the short syntax and the long syntax.

Compose implementations MUST report an error if the secret doesn’t exist on the platform or isn’t defined in the secrets section of this Compose file.

Short syntax

The short syntax variant only specifies the secret name. This grants the container access to the secret and mounts it as read-only to /run/secrets/<secret_name> within the container. The source name and destination mountpoint are both set to the secret name.

The following example uses the short syntax to grant the frontend service access to the server-certificate secret. The value of server-certificate is set to the contents of the file ./server.cert.

services:
+  frontend:
+    image: awesome/webapp
+    secrets:
+      - server-certificate
+secrets:
+  server-certificate:
+    file: ./server.cert
+

Long syntax

The long syntax provides more granularity in how the secret is created within the service’s containers.

The following example sets the name of the server-certificate secret file to server.crt within the container, sets the mode to 0440 (group-readable) and sets the user and group to 103. The value of server-certificate secret is provided by the platform through a lookup and the secret lifecycle not directly managed by the Compose implementation.

services:
+  frontend:
+    image: awesome/webapp
+    secrets:
+      - source: server-certificate
+        target: server.cert
+        uid: "103"
+        gid: "103"
+        mode: 0440
+secrets:
+  server-certificate:
+    external: true
+

Services MAY be granted access to multiple secrets. Long and short syntax for secrets MAY be used in the same Compose file. Defining a secret in the top-level secrets MUST NOT imply granting any service access to it. Such grant must be explicit within service specification as secrets service element.

security_opt

security_opt overrides the default labeling scheme for each container.

security_opt:
+  - label:user:USER
+  - label:role:ROLE
+

shm_size

shm_size configures the size of the shared memory (/dev/shm partition on Linux) allowed by the service container. Specified as a byte value.

stdin_open

stdin_open configures service containers to run with an allocated stdin.

stop_grace_period

stop_grace_period specifies how long the Compose implementation MUST wait when attempting to stop a container if it doesn’t handle SIGTERM (or whichever stop signal has been specified with stop_signal), before sending SIGKILL. Specified as a duration.

    stop_grace_period: 1s
+    stop_grace_period: 1m30s
+

Default value is 10 seconds for the container to exit before sending SIGKILL.

stop_signal

stop_signal defines the signal that the Compose implementation MUST use to stop the service containers. If unset containers are stopped by the Compose Implementation by sending SIGTERM.

stop_signal: SIGUSR1
+

storage_opt

storage_opt defines storage driver options for a service.

storage_opt:
+  size: '1G'
+

sysctls

sysctls defines kernel parameters to set in the container. sysctls can use either an array or a map.

sysctls:
+  net.core.somaxconn: 1024
+  net.ipv4.tcp_syncookies: 0
+
sysctls:
+  - net.core.somaxconn=1024
+  - net.ipv4.tcp_syncookies=0
+

You can only use sysctls that are namespaced in the kernel. Docker does not support changing sysctls inside a container that also modify the host system. For an overview of supported sysctls, refer to configure namespaced kernel parameters (sysctls) at runtime.

tmpfs

tmpfs mounts a temporary file system inside the container. Can be a single value or a list.

tmpfs: /run
+
tmpfs:
+  - /run
+  - /tmp
+

tty

tty configure service container to run with a TTY.

ulimits

ulimits overrides the default ulimits for a container. Either specifies as a single limit as an integer or soft/hard limits as a mapping.

ulimits:
+  nproc: 65535
+  nofile:
+    soft: 20000
+    hard: 40000
+

user

user overrides the user used to run the container process. Default is that set by image (i.e. Dockerfile USER), if not set, root.

userns_mode

userns_mode sets the user namespace for the service. Supported values are platform specific and MAY depend on platform configuration

userns_mode: "host"
+

volumes

volumes defines mount host paths or named volumes that MUST be accessible by service containers.

If the mount is a host path and only used by a single service, it MAY be declared as part of the service definition instead of the top-level volumes key.

To reuse a volume across multiple services, a named volume MUST be declared in the top-level volumes key.

This example shows a named volume (db-data) being used by the backend service, and a bind mount defined for a single service

services:
+  backend:
+    image: awesome/backend
+    volumes:
+      - type: volume
+        source: db-data
+        target: /data
+        volume:
+          nocopy: true
+      - type: bind
+        source: /var/run/postgres/postgres.sock
+        target: /var/run/postgres/postgres.sock
+
+volumes:
+  db-data:
+

Short syntax

The short syntax uses a single string with colon-separated values to specify a volume mount (VOLUME:CONTAINER_PATH), or an access mode (VOLUME:CONTAINER_PATH:ACCESS_MODE).

Note: The SELinux re-labeling bind mount option is ignored on platforms without SELinux.

Note: Relative host paths MUST only be supported by Compose implementations that deploy to a local container runtime. This is because the relative path is resolved from the Compose file’s parent directory which is only applicable in the local case. Compose Implementations deploying to a non-local platform MUST reject Compose files which use relative host paths with an error. To avoid ambiguities with named volumes, relative paths SHOULD always begin with . or ...

Long syntax

The long form syntax allows the configuration of additional fields that can’t be expressed in the short form.

volumes_from

volumes_from mounts all of the volumes from another service or container, optionally specifying read-only access (ro) or read-write (rw). If no access level is specified, then read-write MUST be used.

String value defines another service in the Compose application model to mount volumes from. The container: prefix, if supported, allows to mount volumes from a container that is not managed by the Compose implementation.

volumes_from:
+  - service_name
+  - service_name:ro
+  - container:container_name
+  - container:container_name:rw
+

working_dir

working_dir overrides the container’s working directory from that specified by image (i.e. Dockerfile WORKDIR).

Networks top-level element

Networks are the layer that allow services to communicate with each other. The networking model exposed to a service is limited to a simple IP connection with target services and external resources, while the Network definition allows fine-tuning the actual implementation provided by the platform.

Networks can be created by specifying the network name under a top-level networks section. Services can connect to networks by specifying the network name under the service networks subsection

In the following example, at runtime, networks front-tier and back-tier will be created and the frontend service connected to the front-tier network and the back-tier network.

services:
+  frontend:
+    image: awesome/webapp
+    networks:
+      - front-tier
+      - back-tier
+
+networks:
+  front-tier:
+  back-tier:
+

driver

driver specifies which driver should be used for this network. Compose implementations MUST return an error if the driver is not available on the platform.

driver: overlay
+

Default and available values are platform specific. Compose specification MUST support the following specific drivers: none and host

host or none

The syntax for using built-in networks such as host and none is different, as such networks implicitly exists outside the scope of the Compose implementation. To use them one MUST define an external network with the name host or none and an alias that the Compose implementation can use (hostnet or nonet in the following examples), then grant the service access to that network using its alias.

services:
+  web:
+    networks:
+      hostnet: {}
+
+networks:
+  hostnet:
+    external: true
+    name: host
+
services:
+  web:
+    ...
+    networks:
+      nonet: {}
+
+networks:
+  nonet:
+    external: true
+    name: none
+

driver_opts

driver_opts specifies a list of options as key-value pairs to pass to the driver for this network. These options are driver-dependent - consult the driver’s documentation for more information. Optional.

driver_opts:
+  foo: "bar"
+  baz: 1
+

attachable

If attachable is set to true, then standalone containers SHOULD be able attach to this network, in addition to services. If a standalone container attaches to the network, it can communicate with services and other standalone containers that are also attached to the network.

networks:
+  mynet1:
+    driver: overlay
+    attachable: true
+

enable_ipv6

enable_ipv6 enable IPv6 networking on this network.

ipam

ipam specifies custom a IPAM configuration. This is an object with several properties, each of which is optional:

A full example:

ipam:
+  driver: default
+  config:
+    - subnet: 172.28.0.0/16
+      ip_range: 172.28.5.0/24
+      gateway: 172.28.5.254
+      aux_addresses:
+        host1: 172.28.1.5
+        host2: 172.28.1.6
+        host3: 172.28.1.7
+  options:
+    foo: bar
+    baz: "0"
+

internal

By default, Compose implementations MUST provides external connectivity to networks. internal when set to true allow to create an externally isolated network.

labels

Add metadata to containers using Labels. Can use either an array or a dictionary.

Users SHOULD use reverse-DNS notation to prevent labels from conflicting with those used by other software.

labels:
+  com.example.description: "Financial transaction network"
+  com.example.department: "Finance"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Financial transaction network"
+  - "com.example.department=Finance"
+  - "com.example.label-with-empty-value"
+

Compose implementations MUST set com.docker.compose.project and com.docker.compose.network labels.

external

If set to true, external specifies that this network’s lifecycle is maintained outside of that of the application. Compose Implementations SHOULD NOT attempt to create these networks, and raises an error if one doesn’t exist.

In the example below, proxy is the gateway to the outside world. Instead of attempting to create a network, Compose implementations SHOULD interrogate the platform for an existing network simply called outside and connect the proxy service’s containers to it.

+services:
+  proxy:
+    image: awesome/proxy
+    networks:
+      - outside
+      - default
+  app:
+    image: awesome/app
+    networks:
+      - default
+
+networks:
+  outside:
+    external: true
+

name

name sets a custom name for this network. The name field can be used to reference networks which contain special characters. The name is used as is and will not be scoped with the project name.

networks:
+  network1:
+    name: my-app-net
+

It can also be used in conjunction with the external property to define the platform network that the Compose implementation should retrieve, typically by using a parameter so the Compose file doesn’t need to hard-code runtime specific values:

networks:
+  network1:
+    external: true
+    name: "${NETWORK_ID}"
+

Volumes top-level element

Volumes are persistent data stores implemented by the platform. The Compose specification offers a neutral abstraction for services to mount volumes, and configuration parameters to allocate them on infrastructure.

The volumes section allows the configuration of named volumes that can be reused across multiple services. Here’s an example of a two-service setup where a database’s data directory is shared with another service as a volume named db-data so that it can be periodically backed up:

services:
+  backend:
+    image: awesome/database
+    volumes:
+      - db-data:/etc/data
+
+  backup:
+    image: backup-service
+    volumes:
+      - db-data:/var/lib/backup/data
+
+volumes:
+  db-data:
+

An entry under the top-level volumes key can be empty, in which case it uses the platform’s default configuration for creating a volume. Optionally, you can configure it with the following keys:

driver

Specify which volume driver should be used for this volume. Default and available values are platform specific. If the driver is not available, the Compose implementation MUST return an error and stop application deployment.

driver: foobar
+

driver_opts

driver_opts specifies a list of options as key-value pairs to pass to the driver for this volume. Those options are driver-dependent.

volumes:
+  example:
+    driver_opts:
+      type: "nfs"
+      o: "addr=10.40.0.199,nolock,soft,rw"
+      device: ":/docker/example"
+

external

If set to true, external specifies that this volume already exist on the platform and its lifecycle is managed outside of that of the application. Compose implementations MUST NOT attempt to create these volumes, and MUST return an error if they do not exist.

In the example below, instead of attempting to create a volume called {project_name}_db-data, Compose looks for an existing volume simply called db-data and mounts it into the backend service’s containers.

services:
+  backend:
+    image: awesome/database
+    volumes:
+      - db-data:/etc/data
+
+volumes:
+  db-data:
+    external: true
+

labels

labels are used to add metadata to volumes. You can use either an array or a dictionary.

It’s recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.

labels:
+  com.example.description: "Database volume"
+  com.example.department: "IT/Ops"
+  com.example.label-with-empty-value: ""
+
labels:
+  - "com.example.description=Database volume"
+  - "com.example.department=IT/Ops"
+  - "com.example.label-with-empty-value"
+

Compose implementation MUST set com.docker.compose.project and com.docker.compose.volume labels.

name

name set a custom name for this volume. The name field can be used to reference volumes that contain special characters. The name is used as is and will not be scoped with the stack name.

volumes:
+  data:
+    name: "my-app-data"
+

It can also be used in conjunction with the external property. Doing so the name of the volume used to lookup for actual volume on platform is set separately from the name used to refer to it within the Compose file:

volumes:
+  db-data:
+    external:
+      name: actual-name-of-volume
+

This make it possible to make this lookup name a parameter of a Compose file, so that the model ID for volume is hard-coded but the actual volume ID on platform is set at runtime during deployment:

volumes:
+  db-data:
+    external:
+      name: ${DATABASE_VOLUME}
+

Configs top-level element

Configs allow services to adapt their behaviour without the need to rebuild a Docker image. Configs are comparable to Volumes from a service point of view as they are mounted into service’s containers filesystem. The actual implementation detail to get configuration provided by the platform can be set from the Configuration definition.

When granted access to a config, the config content is mounted as a file in the container. The location of the mount point within the container defaults to /<config-name> in Linux containers and C:\<config-name> in Windows containers.

By default, the config MUST be owned by the user running the container command but can be overridden by service configuration. By default, the config MUST have world-readable permissions (mode 0444), unless service is configured to override this.

Services can only access configs when explicitly granted by a configs subsection.

The top-level configs declaration defines or references configuration data that can be granted to the services in this application. The source of the config is either file or external.

In this example, http_config is created (as <project_name>_http_config) when the application is deployed, and my_second_config MUST already exist on Platform and value will be obtained by lookup.

In this example, server-http_config is created as <project_name>_http_config when the application is deployed, by registering content of the httpd.conf as configuration data.

configs:
+  http_config:
+    file: ./httpd.conf
+

Alternatively, http_config can be declared as external, doing so Compose implementation will lookup http_config to expose configuration data to relevant services.

configs:
+  http_config:
+    external: true
+

External configs lookup can also use a distinct key by specifying a name. The following example modifies the previous one to lookup for config using a parameter HTTP_CONFIG_KEY. Doing so the actual lookup key will be set at deployment time by interpolation of variables, but exposed to containers as hard-coded ID http_config.

configs:
+  http_config:
+    external: true
+    name: "${HTTP_CONFIG_KEY}"
+

Compose file need to explicitly grant access to the configs to relevant services in the application.

Secrets top-level element

Secrets are a flavour of Configs focussing on sensitive data, with specific constraint for this usage. As the platform implementation may significantly differ from Configs, dedicated Secrets section allows to configure the related resources.

The top-level secrets declaration defines or references sensitive data that can be granted to the services in this application. The source of the secret is either file or external.

In this example, server-certificate is created as <project_name>_server-certificate when the application is deployed, by registering content of the server.cert as a platform secret.

secrets:
+  server-certificate:
+    file: ./server.cert
+

Alternatively, server-certificate can be declared as external, doing so Compose implementation will lookup server-certificate to expose secret to relevant services.

secrets:
+  server-certificate:
+    external: true
+

External secrets lookup can also use a distinct key by specifying a name. The following example modifies the previous one to look up for secret using a parameter CERTIFICATE_KEY. Doing so the actual lookup key will be set at deployment time by interpolation of variables, but exposed to containers as hard-coded ID server-certificate.

secrets:
+  server-certificate:
+    external: true
+    name: "${CERTIFICATE_KEY}"
+

Compose file need to explicitly grant access to the secrets to relevant services in the application.

Fragments

It is possible to re-use configuration fragments using YAML anchors.

volumes:
+  db-data: &default-volume
+    driver: default
+  metrics: *default-volume
+

In previous sample, an anchor is created as default-volume based on db-data volume specification. It is later reused by alias *default-volume to define metrics volume. Same logic can apply to any element in a Compose file. Anchor resolution MUST take place before variables interpolation, so variables can’t be used to set anchors or aliases.

It is also possible to partially override values set by anchor reference using the YAML merge type. In following example, metrics volume specification uses alias to avoid repetition but override name attribute:

+services:
+  backend:
+    image: awesome/database
+    volumes:
+      - db-data
+      - metrics
+volumes:
+  db-data: &default-volume
+    driver: default
+    name: "data"
+  metrics:
+    <<: *default-volume
+    name: "metrics"
+

Extension

Special extension fields can be of any format as long as their name starts with the x- character sequence. They can be used within any structure in a Compose file. This is the sole exception for Compose implementations to silently ignore unrecognized field.

x-custom:
+  foo:
+    - bar
+    - zot
+
+services:
+  webapp:
+    image: awesome/webapp
+    x-foo: bar
+

The contents of such fields are unspecified by Compose specification, and can be used to enable custom features. Compose implementation to encounter an unknown extension field MUST NOT fail, but COULD warn about unknown field.

For platform extensions, it is highly recommended to prefix extension by platform/vendor name, the same way browsers add support for custom CSS features.

service:
+  backend:
+    deploy:
+      placement:
+        x-aws-role: "arn:aws:iam::XXXXXXXXXXXX:role/foo"
+        x-aws-region: "eu-west-3"
+        x-azure-region: "france-central"
+

Informative Historical Notes

This section is informative. At the time of writing, the following prefixes are known to exist:

prefix vendor/organization
docker Docker
kubernetes Kubernetes

Using extensions as fragments

With the support for extension fields, Compose file can be written as follows to improve readability of reused fragments:

x-logging: &default-logging
+  options:
+    max-size: "12m"
+    max-file: "5"
+  driver: json-file
+
+services:
+  frontend:
+    image: awesome/webapp
+    logging: *default-logging
+  backend:
+    image: awesome/database
+    logging: *default-logging
+

specifying byte values

Value express a byte value as a string in {amount}{byte unit} format: The supported units are b (bytes), k or kb (kilo bytes), m or mb (mega bytes) and g or gb (giga bytes).

    2b
+    1024kb
+    2048k
+    300m
+    1gb
+

specifying durations

Value express a duration as a string in the in the form of {value}{unit}. The supported units are us (microseconds), ms (milliseconds), s (seconds), m (minutes) and h (hours). Value can can combine multiple values and using without separator.

  10ms
+  40s
+  1m30s
+  1h5m30s20ms
+

Interpolation

Values in a Compose file can be set by variables, and interpolated at runtime. Compose files use a Bash-like syntax ${VARIABLE}

Both $VARIABLE and ${VARIABLE} syntax are supported. Default values can be defined inline using typical shell syntax: latest

Similarly, the following syntax allows you to specify mandatory variables:

Interpolation can also be nested:

Other extended shell-style features, such as ${VARIABLE/foo/bar}, are not supported by the Compose specification.

You can use a $$ (double-dollar sign) when your configuration needs a literal dollar sign. This also prevents Compose from interpolating a value, so a $$ allows you to refer to environment variables that you don’t want processed by Compose.

web:
+  build: .
+  command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE"
+

If the Compose implementation can’t resolve a substituted variable and no default value is defined, it MUST warn the user and substitute the variable with an empty string.

As any values in a Compose file can be interpolated with variable substitution, including compact string notation for complex elements, interpolation MUST be applied before merge on a per-file-basis.

Compose documentation

+

fig, composition, compose, docker

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/compose-file/ +

+
diff --git a/devdocs/docker/compose%2Fenv-file%2Findex.html b/devdocs/docker/compose%2Fenv-file%2Findex.html new file mode 100644 index 00000000..c0abc37d --- /dev/null +++ b/devdocs/docker/compose%2Fenv-file%2Findex.html @@ -0,0 +1,11 @@ +

Declare default environment variables in file

+ +

Compose supports declaring default environment variables in an environment file named .env placed in the project directory. Docker Compose versions earlier than 1.28, load the .env file from the current working directory, where the command is executed, or from the project directory if this is explicitly set with the --project-directory option. This inconsistency has been addressed starting with +v1.28 by limiting the default .env file path to the project directory. You can use the --env-file commandline option to override the default .env and specify the path to a custom environment file.

The project directory is specified by the order of precedence:

Syntax rules

The following syntax rules apply to the .env file:

Compose file and CLI variables

The environment variables you define here are used for variable substitution in your Compose file, and can also be used to define the following CLI variables:

Notes

More Compose documentation

+

fig, composition, compose, docker, orchestration, environment, env file

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/env-file/ +

+
diff --git a/devdocs/docker/compose%2Fenvironment-variables%2Findex.html b/devdocs/docker/compose%2Fenvironment-variables%2Findex.html new file mode 100644 index 00000000..48734964 --- /dev/null +++ b/devdocs/docker/compose%2Fenvironment-variables%2Findex.html @@ -0,0 +1,95 @@ +

Environment variables in Compose

+ +

There are multiple parts of Compose that deal with environment variables in one sense or another. This page should help you find the information you need.

Substitute environment variables in Compose files

It’s possible to use environment variables in your shell to populate values inside a Compose file:

web:
+  image: "webapp:${TAG}"
+

If you have multiple environment variables, you can substitute them by adding them to a default environment variable file named .env or by providing a path to your environment variables file using the --env-file command line option.

Your configuration options can contain environment variables. Compose uses the variable values from the shell environment in which docker-compose is run. For example, suppose the shell contains POSTGRES_VERSION=9.3 and you supply this configuration:

db:
+  image: "postgres:${POSTGRES_VERSION}"
+

When you run docker-compose up with this configuration, Compose looks for the POSTGRES_VERSION environment variable in the shell and substitutes its value in. For this example, Compose resolves the image to postgres:9.3 before running the configuration.

If an environment variable is not set, Compose substitutes with an empty string. In the example above, if POSTGRES_VERSION is not set, the value for the image option is postgres:.

You can set default values for environment variables using a .env file, which Compose automatically looks for in project directory (parent folder of your Compose file). Values set in the shell environment override those set in the .env file.

Note when using docker stack deploy

The .env file feature only works when you use the docker-compose up command and does not work with docker stack deploy.

Both $VARIABLE and ${VARIABLE} syntax are supported. Additionally when using the 2.1 file format, it is possible to provide inline default values using typical shell syntax:

Similarly, the following syntax allows you to specify mandatory variables:

Other extended shell-style features, such as ${VARIABLE/foo/bar}, are not supported.

You can use a $$ (double-dollar sign) when your configuration needs a literal dollar sign. This also prevents Compose from interpolating a value, so a $$ allows you to refer to environment variables that you don’t want processed by Compose.

web:
+  build: .
+  command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE"
+

If you forget and use a single dollar sign ($), Compose interprets the value as an environment variable and warns you:

The VAR_NOT_INTERPOLATED_BY_COMPOSE is not set. Substituting an empty string.
+

The “.env” file

You can set default values for any environment variables referenced in the Compose file, or used to configure Compose, in an environment file named .env. The .env file path is as follows:

$ cat .env
+TAG=v1.5
+
+$ cat docker-compose.yml
+version: '3'
+services:
+  web:
+    image: "webapp:${TAG}"
+

When you run docker-compose up, the web service defined above uses the image webapp:v1.5. You can verify this with the config command, which prints your resolved application config to the terminal:

$ docker-compose config
+
+version: '3'
+services:
+  web:
+    image: 'webapp:v1.5'
+

Values in the shell take precedence over those specified in the .env file.

If you set TAG to a different value in your shell, the substitution in image uses that instead:

$ export TAG=v2.0
+$ docker-compose config
+
+version: '3'
+services:
+  web:
+    image: 'webapp:v2.0'
+

You can override the environment file path using a command line argument --env-file.

Using the “--env-file” option

By passing the file as an argument, you can store it anywhere and name it appropriately, for example, .env.ci, .env.dev, .env.prod. Passing the file path is done using the --env-file option:

$ docker-compose --env-file ./config/.env.dev up 
+

This file path is relative to the current working directory where the Docker Compose command is executed.

$ cat .env
+TAG=v1.5
+
+$ cat ./config/.env.dev
+TAG=v1.6
+
+
+$ cat docker-compose.yml
+version: '3'
+services:
+  web:
+    image: "webapp:${TAG}"
+

The .env file is loaded by default:

$ docker-compose config 
+version: '3'
+services:
+  web:
+    image: 'webapp:v1.5'
+

Passing the --env-file argument overrides the default file path:

$ docker-compose --env-file ./config/.env.dev config 
+version: '3'
+services:
+  web:
+    image: 'webapp:v1.6'
+

When an invalid file path is being passed as --env-file argument, Compose returns an error:

$ docker-compose --env-file ./doesnotexist/.env.dev  config
+ERROR: Couldn't find env file: /home/user/./doesnotexist/.env.dev
+

For more information, see the Variable substitution section in the Compose file reference.

Set environment variables in containers

You can set environment variables in a service’s containers with the ‘environment’ key, just like with docker run -e VARIABLE=VALUE ...:

web:
+  environment:
+    - DEBUG=1
+

Pass environment variables to containers

You can pass environment variables from your shell straight through to a service’s containers with the ‘environment’ key by not giving them a value, just like with docker run -e VARIABLE ...:

web:
+  environment:
+    - DEBUG
+

The value of the DEBUG variable in the container is taken from the value for the same variable in the shell in which Compose is run.

The “env_file” configuration option

You can pass multiple environment variables from an external file through to a service’s containers with the ‘env_file’ option, just like with docker run --env-file=FILE ...:

web:
+  env_file:
+    - web-variables.env
+

Set environment variables with ‘docker-compose run’

Similar to docker run -e, you can set environment variables on a one-off container with docker-compose run -e:

$ docker-compose run -e DEBUG=1 web python console.py
+

You can also pass a variable from the shell by not giving it a value:

$ docker-compose run -e DEBUG web python console.py
+

The value of the DEBUG variable in the container is taken from the value for the same variable in the shell in which Compose is run.

When you set the same environment variable in multiple files, here’s the priority used by Compose to choose which value to use:

  1. Compose file
  2. Shell environment variables
  3. Environment file
  4. Dockerfile
  5. Variable is not defined

In the example below, we set the same environment variable on an Environment file, and the Compose file:

$ cat ./Docker/api/api.env
+NODE_ENV=test
+
+$ cat docker-compose.yml
+version: '3'
+services:
+  api:
+    image: 'node:6-alpine'
+    env_file:
+     - ./Docker/api/api.env
+    environment:
+     - NODE_ENV=production
+

When you run the container, the environment variable defined in the Compose file takes precedence.

$ docker-compose exec api node
+
+> process.env.NODE_ENV
+'production'
+

Having any ARG or ENV setting in a Dockerfile evaluates only if there is no Docker Compose entry for environment or env_file.

Specifics for NodeJS containers

If you have a package.json entry for script:start like NODE_ENV=test node server.js, then this overrules any setting in your docker-compose.yml file.

Configure Compose using environment variables

Several environment variables are available for you to configure the Docker Compose command-line behavior. They begin with COMPOSE_ or DOCKER_, and are documented in CLI Environment Variables.

+

compose, orchestration, environment, env file

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/environment-variables/ +

+
diff --git a/devdocs/docker/compose%2Fextends%2Findex.html b/devdocs/docker/compose%2Fextends%2Findex.html new file mode 100644 index 00000000..fa8c4154 --- /dev/null +++ b/devdocs/docker/compose%2Fextends%2Findex.html @@ -0,0 +1,192 @@ +

Share Compose configurations between files and projects

+ +

Compose supports two methods of sharing common configuration:

  1. Extending an entire Compose file by using multiple Compose files +
  2. Extending individual services with the extends field (for Compose file versions up to 2.1)

Multiple Compose files

Using multiple Compose files enables you to customize a Compose application for different environments or different workflows.

Understanding multiple Compose files

By default, Compose reads two files, a docker-compose.yml and an optional docker-compose.override.yml file. By convention, the docker-compose.yml contains your base configuration. The override file, as its name implies, can contain configuration overrides for existing services or entirely new services.

If a service is defined in both files, Compose merges the configurations using the rules described in Adding and overriding configuration.

To use multiple override files, or an override file with a different name, you can use the -f option to specify the list of files. Compose merges files in the order they’re specified on the command line. See the docker-compose command reference for more information about using -f.

When you use multiple configuration files, you must make sure all paths in the files are relative to the base Compose file (the first Compose file specified with -f). This is required because override files need not be valid Compose files. Override files can contain small fragments of configuration. Tracking which fragment of a service is relative to which path is difficult and confusing, so to keep paths easier to understand, all paths must be defined relative to the base file.

Example use case

In this section, there are two common use cases for multiple Compose files: changing a Compose app for different environments, and running administrative tasks against a Compose app.

Different environments

A common use case for multiple files is changing a development Compose app for a production-like environment (which may be production, staging or CI). To support these differences, you can split your Compose configuration into a few different files:

Start with a base file that defines the canonical configuration for the services.

docker-compose.yml

web:
+  image: example/my_web_app:latest
+  depends_on:
+    - db
+    - cache
+
+db:
+  image: postgres:latest
+
+cache:
+  image: redis:latest
+

In this example the development configuration exposes some ports to the host, mounts our code as a volume, and builds the web image.

docker-compose.override.yml

web:
+  build: .
+  volumes:
+    - '.:/code'
+  ports:
+    - 8883:80
+  environment:
+    DEBUG: 'true'
+
+db:
+  command: '-d'
+  ports:
+    - 5432:5432
+
+cache:
+  ports:
+    - 6379:6379
+

When you run docker-compose up it reads the overrides automatically.

Now, it would be nice to use this Compose app in a production environment. So, create another override file (which might be stored in a different git repo or managed by a different team).

docker-compose.prod.yml

web:
+  ports:
+    - 80:80
+  environment:
+    PRODUCTION: 'true'
+
+cache:
+  environment:
+    TTL: '500'
+

To deploy with this production Compose file you can run

$ docker-compose -f docker-compose.yml -f docker-compose.prod.yml up -d
+

This deploys all three services using the configuration in docker-compose.yml and docker-compose.prod.yml (but not the dev configuration in docker-compose.override.yml).

See production for more information about Compose in production.

Administrative tasks

Another common use case is running adhoc or administrative tasks against one or more services in a Compose app. This example demonstrates running a database backup.

Start with a docker-compose.yml.

web:
+  image: example/my_web_app:latest
+  depends_on:
+    - db
+
+db:
+  image: postgres:latest
+

In a docker-compose.admin.yml add a new service to run the database export or backup.

    dbadmin:
+      build: database_admin/
+      depends_on:
+        - db
+

To start a normal environment run docker-compose up -d. To run a database backup, include the docker-compose.admin.yml as well.

$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml \
+  run dbadmin db-backup
+

Extending services

Note

The extends keyword is supported in earlier Compose file formats up to Compose file version 2.1 (see extends in v2), but is not supported in Compose version 3.x. See the Version 3 summary of keys added and removed, along with information on how to upgrade. See moby/moby#31101 to follow the discussion thread on the possibility of adding support for extends in some form in future versions. The extends keyword has been included in docker-compose versions 1.27 and higher.

Docker Compose’s extends keyword enables the sharing of common configurations among different files, or even different projects entirely. Extending services is useful if you have several services that reuse a common set of configuration options. Using extends you can define a common set of service options in one place and refer to it from anywhere.

Keep in mind that volumes_from and depends_on are never shared between services using extends. These exceptions exist to avoid implicit dependencies; you always define volumes_from locally. This ensures dependencies between services are clearly visible when reading the current file. Defining these locally also ensures that changes to the referenced file don’t break anything.

Understand the extends configuration

When defining any service in docker-compose.yml, you can declare that you are extending another service like this:

services:
+  web:
+    extends:
+      file: common-services.yml
+      service: webapp
+

This instructs Compose to re-use the configuration for the webapp service defined in the common-services.yml file. Suppose that common-services.yml looks like this:

services:
+  webapp:
+    build: .
+    ports:
+      - "8000:8000"
+    volumes:
+      - "/data"
+

In this case, you get exactly the same result as if you wrote docker-compose.yml with the same build, ports and volumes configuration values defined directly under web.

You can go further and define (or re-define) configuration locally in docker-compose.yml:

services:
+  web:
+    extends:
+      file: common-services.yml
+      service: webapp
+    environment:
+      - DEBUG=1
+    cpu_shares: 5
+
+  important_web:
+    extends: web
+    cpu_shares: 10
+

You can also write other services and link your web service to them:

services:
+  web:
+    extends:
+      file: common-services.yml
+      service: webapp
+    environment:
+      - DEBUG=1
+    cpu_shares: 5
+    depends_on:
+      - db
+  db:
+    image: postgres
+

Example use case

Extending an individual service is useful when you have multiple services that have a common configuration. The example below is a Compose app with two services: a web application and a queue worker. Both services use the same codebase and share many configuration options.

In a common.yml we define the common configuration:

services:
+  app:
+    build: .
+    environment:
+      CONFIG_FILE_PATH: /code/config
+      API_KEY: xxxyyy
+    cpu_shares: 5
+

In a docker-compose.yml we define the concrete services which use the common configuration:

services:
+  webapp:
+    extends:
+      file: common.yml
+      service: app
+    command: /code/run_web_app
+    ports:
+      - 8080:8080
+    depends_on:
+      - queue
+      - db
+
+  queue_worker:
+    extends:
+      file: common.yml
+      service: app
+    command: /code/run_worker
+    depends_on:
+      - queue
+

Adding and overriding configuration

Compose copies configurations from the original service over to the local one. If a configuration option is defined in both the original service and the local service, the local value replaces or extends the original value.

For single-value options like image, command or mem_limit, the new value replaces the old value.

original service:

services:
+  myservice:
+    # ...
+    command: python app.py
+

local service:

services:
+  myservice:
+    # ...
+    command: python otherapp.py
+

result:

services:
+  myservice:
+    # ...
+    command: python otherapp.py
+

For the multi-value options ports, expose, external_links, dns, dns_search, and tmpfs, Compose concatenates both sets of values:

original service:

services:
+  myservice:
+    # ...
+    expose:
+      - "3000"
+

local service:

services:
+  myservice:
+    # ...
+    expose:
+      - "4000"
+      - "5000"
+

result:

services:
+  myservice:
+    # ...
+    expose:
+      - "3000"
+      - "4000"
+      - "5000"
+

In the case of environment, labels, volumes, and devices, Compose “merges” entries together with locally-defined values taking precedence. For environment and labels, the environment variable or label name determines which value is used:

original service:

services:
+  myservice:
+    # ...
+    environment:
+      - FOO=original
+      - BAR=original
+

local service:

services:
+  myservice:
+    # ...
+    environment:
+      - BAR=local
+      - BAZ=local
+

result

services:
+  myservice:
+    # ...
+    environment:
+      - FOO=original
+      - BAR=local
+      - BAZ=local
+

Entries for volumes and devices are merged using the mount path in the container:

original service:

services:
+  myservice:
+    # ...
+    volumes:
+      - ./original:/foo
+      - ./original:/bar
+

local service:

services:
+  myservice:
+    # ...
+    volumes:
+      - ./local:/bar
+      - ./local:/baz
+

result:

services:
+  myservice:
+    # ...
+    volumes:
+      - ./original:/foo
+      - ./local:/bar
+      - ./local:/baz
+

Compose documentation

+

fig, composition, compose, docker, orchestration, documentation, docs

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/extends/ +

+
diff --git a/devdocs/docker/compose%2Ffaq%2Findex.html b/devdocs/docker/compose%2Ffaq%2Findex.html new file mode 100644 index 00000000..dd913ed2 --- /dev/null +++ b/devdocs/docker/compose%2Ffaq%2Findex.html @@ -0,0 +1,15 @@ +

Frequently asked questions

+ +

If you don’t see your question here, feel free to drop by #docker-compose on the Docker Community Slack.

Can I control service startup order?

Yes - see Controlling startup order.

Why do my services take 10 seconds to recreate or stop?

Compose stop attempts to stop a container by sending a SIGTERM. It then waits for a default timeout of 10 seconds. After the timeout, a SIGKILL is sent to the container to forcefully kill it. If you are waiting for this timeout, it means that your containers aren’t shutting down when they receive the SIGTERM signal.

There has already been a lot written about this problem of processes handling signals in containers.

To fix this problem, try the following:

services:
+  web:
+    build: .
+    stop_signal: SIGINT
+

How do I run multiple copies of a Compose file on the same host?

Compose uses the project name to create unique identifiers for all of a project’s containers and other resources. To run multiple copies of a project, set a custom project name using the -p command line option or the COMPOSE_PROJECT_NAME environment variable.

What’s the difference between up, run, and start?

Typically, you want docker-compose up. Use up to start or restart all the services defined in a docker-compose.yml. In the default “attached” mode, you see all the logs from all the containers. In “detached” mode (-d), Compose exits after starting the containers, but the containers continue to run in the background.

The docker-compose run command is for running “one-off” or “adhoc” tasks. It requires the service name you want to run and only starts containers for services that the running service depends on. Use run to run tests or perform an administrative task such as removing or adding data to a data volume container. The run command acts like docker run -ti in that it opens an interactive terminal to the container and returns an exit status matching the exit status of the process in the container.

The docker-compose start command is useful only to restart containers that were previously created, but were stopped. It never creates new containers.

Can I use json instead of yaml for my Compose file?

Yes. Yaml is a superset of json so any JSON file should be valid Yaml. To use a JSON file with Compose, specify the filename to use, for example:

$ docker-compose -f docker-compose.json up
+

Should I include my code with COPY/ADD or a volume?

You can add your code to the image using COPY or ADD directive in a Dockerfile. This is useful if you need to relocate your code along with the Docker image, for example when you’re sending code to another environment (production, CI, etc).

You should use a volume if you want to make changes to your code and see them reflected immediately, for example when you’re developing code and your server supports hot code reloading or live-reload.

There may be cases where you want to use both. You can have the image include the code using a COPY, and use a volume in your Compose file to include the code from the host during development. The volume overrides the directory contents of the image.

Where can I find example compose files?

There are many examples of Compose files on GitHub.

Compose documentation

+

documentation, docs, docker, compose, faq

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/faq/ +

+
diff --git a/devdocs/docker/compose%2Fgettingstarted%2Findex.html b/devdocs/docker/compose%2Fgettingstarted%2Findex.html new file mode 100644 index 00000000..75ab44ae --- /dev/null +++ b/devdocs/docker/compose%2Fgettingstarted%2Findex.html @@ -0,0 +1,123 @@ +

Get started with Docker Compose

+ +

On this page you build a simple Python web application running on Docker Compose. The application uses the Flask framework and maintains a hit counter in Redis. While the sample uses Python, the concepts demonstrated here should be understandable even if you’re not familiar with it.

Prerequisites

Make sure you have already installed both Docker Engine and Docker Compose. You don’t need to install Python or Redis, as both are provided by Docker images.

Step 1: Setup

Define the application dependencies.

  1. Create a directory for the project:

    $ mkdir composetest
    +$ cd composetest
    +
  2. Create a file called app.py in your project directory and paste this in:

    import time
    +
    +import redis
    +from flask import Flask
    +
    +app = Flask(__name__)
    +cache = redis.Redis(host='redis', port=6379)
    +
    +def get_hit_count():
    +    retries = 5
    +    while True:
    +        try:
    +            return cache.incr('hits')
    +        except redis.exceptions.ConnectionError as exc:
    +            if retries == 0:
    +                raise exc
    +            retries -= 1
    +            time.sleep(0.5)
    +
    +@app.route('/')
    +def hello():
    +    count = get_hit_count()
    +    return 'Hello World! I have been seen {} times.\n'.format(count)
    +

    In this example, redis is the hostname of the redis container on the application’s network. We use the default port for Redis, 6379.

    Handling transient errors

    Note the way the get_hit_count function is written. This basic retry loop lets us attempt our request multiple times if the redis service is not available. This is useful at startup while the application comes online, but also makes our application more resilient if the Redis service needs to be restarted anytime during the app’s lifetime. In a cluster, this also helps handling momentary connection drops between nodes.

  3. Create another file called requirements.txt in your project directory and paste this in:

    flask
    +redis
    +

Step 2: Create a Dockerfile

In this step, you write a Dockerfile that builds a Docker image. The image contains all the dependencies the Python application requires, including Python itself.

In your project directory, create a file named Dockerfile and paste the following:

# syntax=docker/dockerfile:1
+FROM python:3.7-alpine
+WORKDIR /code
+ENV FLASK_APP=app.py
+ENV FLASK_RUN_HOST=0.0.0.0
+RUN apk add --no-cache gcc musl-dev linux-headers
+COPY requirements.txt requirements.txt
+RUN pip install -r requirements.txt
+EXPOSE 5000
+COPY . .
+CMD ["flask", "run"]
+

This tells Docker to:

For more information on how to write Dockerfiles, see the Docker user guide and the Dockerfile reference.

Step 3: Define services in a Compose file

Create a file called docker-compose.yml in your project directory and paste the following:

version: "3.9"
+services:
+  web:
+    build: .
+    ports:
+      - "8000:5000"
+  redis:
+    image: "redis:alpine"
+

This Compose file defines two services: web and redis.

Web service

The web service uses an image that’s built from the Dockerfile in the current directory. It then binds the container and the host machine to the exposed port, 8000. This example service uses the default port for the Flask web server, 5000.

Redis service

The redis service uses a public Redis image pulled from the Docker Hub registry.

Step 4: Build and run your app with Compose

  1. From your project directory, start up your application by running docker-compose up.

    $ docker-compose up
    +
    +Creating network "composetest_default" with the default driver
    +Creating composetest_web_1 ...
    +Creating composetest_redis_1 ...
    +Creating composetest_web_1
    +Creating composetest_redis_1 ... done
    +Attaching to composetest_web_1, composetest_redis_1
    +web_1    |  * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
    +redis_1  | 1:C 17 Aug 22:11:10.480 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
    +redis_1  | 1:C 17 Aug 22:11:10.480 # Redis version=4.0.1, bits=64, commit=00000000, modified=0, pid=1, just started
    +redis_1  | 1:C 17 Aug 22:11:10.480 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
    +web_1    |  * Restarting with stat
    +redis_1  | 1:M 17 Aug 22:11:10.483 * Running mode=standalone, port=6379.
    +redis_1  | 1:M 17 Aug 22:11:10.483 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
    +web_1    |  * Debugger is active!
    +redis_1  | 1:M 17 Aug 22:11:10.483 # Server initialized
    +redis_1  | 1:M 17 Aug 22:11:10.483 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
    +web_1    |  * Debugger PIN: 330-787-903
    +redis_1  | 1:M 17 Aug 22:11:10.483 * Ready to accept connections
    +

    Compose pulls a Redis image, builds an image for your code, and starts the services you defined. In this case, the code is statically copied into the image at build time.

  2. Enter http://localhost:8000/ in a browser to see the application running.

    If you’re using Docker natively on Linux, Docker Desktop for Mac, or Docker Desktop for Windows, then the web app should now be listening on port 8000 on your Docker daemon host. Point your web browser to http://localhost:8000 to find the Hello World message. If this doesn’t resolve, you can also try http://127.0.0.1:8000.

    You should see a message in your browser saying:

    Hello World! I have been seen 1 times.
    +

    hello world in browser

  3. Refresh the page.

    The number should increment.

    Hello World! I have been seen 2 times.
    +

    hello world in browser

  4. Switch to another terminal window, and type docker image ls to list local images.

    Listing images at this point should return redis and web.

    $ docker image ls
    +
    +REPOSITORY        TAG           IMAGE ID      CREATED        SIZE
    +composetest_web   latest        e2c21aa48cc1  4 minutes ago  93.8MB
    +python            3.4-alpine    84e6077c7ab6  7 days ago     82.5MB
    +redis             alpine        9d8fa9aa0e5b  3 weeks ago    27.5MB
    +

    You can inspect images with docker inspect <tag or id>.

  5. Stop the application, either by running docker-compose down from within your project directory in the second terminal, or by hitting CTRL+C in the original terminal where you started the app.

Step 5: Edit the Compose file to add a bind mount

Edit docker-compose.yml in your project directory to add a bind mount for the web service:

version: "3.9"
+services:
+  web:
+    build: .
+    ports:
+      - "8000:5000"
+    volumes:
+      - .:/code
+    environment:
+      FLASK_ENV: development
+  redis:
+    image: "redis:alpine"
+

The new volumes key mounts the project directory (current directory) on the host to /code inside the container, allowing you to modify the code on the fly, without having to rebuild the image. The environment key sets the FLASK_ENV environment variable, which tells flask run to run in development mode and reload the code on change. This mode should only be used in development.

Step 6: Re-build and run the app with Compose

From your project directory, type docker-compose up to build the app with the updated Compose file, and run it.

$ docker-compose up
+
+Creating network "composetest_default" with the default driver
+Creating composetest_web_1 ...
+Creating composetest_redis_1 ...
+Creating composetest_web_1
+Creating composetest_redis_1 ... done
+Attaching to composetest_web_1, composetest_redis_1
+web_1    |  * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)
+...
+

Check the Hello World message in a web browser again, and refresh to see the count increment.

Shared folders, volumes, and bind mounts

Step 7: Update the application

Because the application code is now mounted into the container using a volume, you can make changes to its code and see the changes instantly, without having to rebuild the image.

Change the greeting in app.py and save it. For example, change the Hello World! message to Hello from Docker!:

return 'Hello from Docker! I have been seen {} times.\n'.format(count)
+

Refresh the app in your browser. The greeting should be updated, and the counter should still be incrementing.

hello world in browser

Step 8: Experiment with some other commands

If you want to run your services in the background, you can pass the -d flag (for “detached” mode) to docker-compose up and use docker-compose ps to see what is currently running:

$ docker-compose up -d
+
+Starting composetest_redis_1...
+Starting composetest_web_1...
+
+$ docker-compose ps
+
+       Name                      Command               State           Ports         
+-------------------------------------------------------------------------------------
+composetest_redis_1   docker-entrypoint.sh redis ...   Up      6379/tcp              
+composetest_web_1     flask run                        Up      0.0.0.0:8000->5000/tcp
+

The docker-compose run command allows you to run one-off commands for your services. For example, to see what environment variables are available to the web service:

$ docker-compose run web env
+

See docker-compose --help to see other available commands.

If you started Compose with docker-compose up -d, stop your services once you’ve finished with them:

$ docker-compose stop
+

You can bring everything down, removing the containers entirely, with the down command. Pass --volumes to also remove the data volume used by the Redis container:

$ docker-compose down --volumes
+

At this point, you have seen the basics of how Compose works.

Where to go next

+

documentation, docs, docker, compose, orchestration, containers

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/gettingstarted/ +

+
diff --git a/devdocs/docker/compose%2Fgpu-support%2Findex.html b/devdocs/docker/compose%2Fgpu-support%2Findex.html new file mode 100644 index 00000000..02d3394f --- /dev/null +++ b/devdocs/docker/compose%2Fgpu-support%2Findex.html @@ -0,0 +1,119 @@ +

Enabling GPU access with Compose

+ +

Compose services can define GPU device reservations if the Docker host contains such devices and the Docker Daemon is set accordingly. For this, make sure to install the prerequisites if you have not already done so.

The examples in the following sections focus specifically on providing service containers access to GPU devices with Docker Compose. You can use either docker-compose or docker compose commands.

Use of service runtime property from Compose v2.3 format (legacy)

Docker Compose v1.27.0+ switched to using the Compose Specification schema which is a combination of all properties from 2.x and 3.x versions. This re-enabled the use of service properties as runtime to provide GPU access to service containers. However, this does not allow to have control over specific properties of the GPU devices.

services:
+  test:
+    image: nvidia/cuda:10.2-base
+    command: nvidia-smi
+    runtime: nvidia
+
+

Enabling GPU access to service containers

Docker Compose v1.28.0+ allows to define GPU reservations using the device structure defined in the Compose Specification. This provides more granular control over a GPU reservation as custom values can be set for the following device properties:

Note

You must set the capabilities field. Otherwise, it returns an error on service deployment.

count and device_ids are mutually exclusive. You must only define one field at a time.

For more information on these properties, see the deploy section in the Compose Specification.

Example of a Compose file for running a service with access to 1 GPU device:

services:
+  test:
+    image: nvidia/cuda:10.2-base
+    command: nvidia-smi
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - driver: nvidia
+              count: 1
+              capabilities: [gpu]
+

Run with Docker Compose:

$ docker-compose up
+Creating network "gpu_default" with the default driver
+Creating gpu_test_1 ... done
+Attaching to gpu_test_1    
+test_1  | +-----------------------------------------------------------------------------+
+test_1  | | NVIDIA-SMI 450.80.02    Driver Version: 450.80.02    CUDA Version: 11.1     |
+test_1  | |-------------------------------+----------------------+----------------------+
+test_1  | | GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+test_1  | | Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+test_1  | |                               |                      |               MIG M. |
+test_1  | |===============================+======================+======================|
+test_1  | |   0  Tesla T4            On   | 00000000:00:1E.0 Off |                    0 |
+test_1  | | N/A   23C    P8     9W /  70W |      0MiB / 15109MiB |      0%      Default |
+test_1  | |                               |                      |                  N/A |
+test_1  | +-------------------------------+----------------------+----------------------+
+test_1  |                                                                                
+test_1  | +-----------------------------------------------------------------------------+
+test_1  | | Processes:                                                                  |
+test_1  | |  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
+test_1  | |        ID   ID                                                   Usage      |
+test_1  | |=============================================================================|
+test_1  | |  No running processes found                                                 |
+test_1  | +-----------------------------------------------------------------------------+
+gpu_test_1 exited with code 0
+
+

If no count or device_ids are set, all GPUs available on the host are going to be used by default.

services:
+  test:
+    image: tensorflow/tensorflow:latest-gpu
+    command: python -c "import tensorflow as tf;tf.test.gpu_device_name()"
+    deploy:
+      resources:
+        reservations:
+          devices:
+            - capabilities: [gpu]
+
$ docker-compose up
+Creating network "gpu_default" with the default driver
+Creating gpu_test_1 ... done
+Attaching to gpu_test_1
+test_1  | I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
+.....
+test_1  | I tensorflow/core/common_runtime/gpu/gpu_device.cc:1402]
+Created TensorFlow device (/device:GPU:0 with 13970 MB memory) -> physical GPU (device: 0, name: Tesla T4, pci bus id: 0000:00:1e.0, compute capability: 7.5)
+test_1  | /device:GPU:0
+gpu_test_1 exited with code 0
+

On machines hosting multiple GPUs, device_ids field can be set to target specific GPU devices and count can be used to limit the number of GPU devices assigned to a service container. If count exceeds the number of available GPUs on the host, the deployment will error out.

$ nvidia-smi   
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 450.80.02    Driver Version: 450.80.02    CUDA Version: 11.0     |
+|-------------------------------+----------------------+----------------------+
+| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+|                               |                      |               MIG M. |
+|===============================+======================+======================|
+|   0  Tesla T4            On   | 00000000:00:1B.0 Off |                    0 |
+| N/A   72C    P8    12W /  70W |      0MiB / 15109MiB |      0%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+|   1  Tesla T4            On   | 00000000:00:1C.0 Off |                    0 |
+| N/A   67C    P8    11W /  70W |      0MiB / 15109MiB |      0%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+|   2  Tesla T4            On   | 00000000:00:1D.0 Off |                    0 |
+| N/A   74C    P8    12W /  70W |      0MiB / 15109MiB |      0%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+|   3  Tesla T4            On   | 00000000:00:1E.0 Off |                    0 |
+| N/A   62C    P8    11W /  70W |      0MiB / 15109MiB |      0%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+

To enable access only to GPU-0 and GPU-3 devices:

services:
+  test:
+    image: tensorflow/tensorflow:latest-gpu
+    command: python -c "import tensorflow as tf;tf.test.gpu_device_name()"
+    deploy:
+      resources:
+        reservations:
+          devices:
+          - driver: nvidia
+            device_ids: ['0', '3']
+            capabilities: [gpu]
+
+
$ docker-compose up
+...
+Created TensorFlow device (/device:GPU:0 with 13970 MB memory -> physical GPU (device: 0, name: Tesla T4, pci bus id: 0000:00:1b.0, compute capability: 7.5)
+...
+Created TensorFlow device (/device:GPU:1 with 13970 MB memory) -> physical GPU (device: 1, name: Tesla T4, pci bus id: 0000:00:1e.0, compute capability: 7.5)
+...
+gpu_test_1 exited with code 0
+
+

documentation, docs, docker, compose, GPU access, NVIDIA, samples

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/gpu-support/ +

+
diff --git a/devdocs/docker/compose%2Findex.html b/devdocs/docker/compose%2Findex.html new file mode 100644 index 00000000..b0e61d85 --- /dev/null +++ b/devdocs/docker/compose%2Findex.html @@ -0,0 +1,28 @@ +

Overview of Docker Compose

+ +

Looking for Compose file reference? Find the latest version here.

Compose is a tool for defining and running multi-container Docker applications. With Compose, you use a YAML file to configure your application’s services. Then, with a single command, you create and start all the services from your configuration. To learn more about all the features of Compose, see the list of features.

Compose works in all environments: production, staging, development, testing, as well as CI workflows. You can learn more about each case in Common Use Cases.

Using Compose is basically a three-step process:

  1. Define your app’s environment with a Dockerfile so it can be reproduced anywhere.

  2. Define the services that make up your app in docker-compose.yml so they can be run together in an isolated environment.

  3. Run docker compose up and the Docker compose command starts and runs your entire app. You can alternatively run docker-compose up using the docker-compose binary.

A docker-compose.yml looks like this:

version: "3.9"  # optional since v1.27.0
+services:
+  web:
+    build: .
+    ports:
+      - "8000:5000"
+    volumes:
+      - .:/code
+      - logvolume01:/var/log
+    links:
+      - redis
+  redis:
+    image: redis
+volumes:
+  logvolume01: {}
+

For more information about the Compose file, see the Compose file reference.

Compose has commands for managing the whole lifecycle of your application:

Compose V2 and the new docker compose command

Important

The new Compose V2, which supports the compose command as part of the Docker CLI, is now available.

Compose V2 integrates compose functions into the Docker platform, continuing to support most of the previous docker-compose features and flags. You can run Compose V2 by replacing the hyphen (-) with a space, using docker compose, instead of docker-compose.

If you rely on using Docker Compose as docker-compose (with a hyphen), you can set up Compose V2 to act as a drop-in replacement of the previous docker-compose. Refer to the Installing Compose section for detailed instructions.

Context of Docker Compose evolution

Introduction of the Compose specification makes a clean distinction between the Compose YAML file model and the docker-compose implementation. Making this change has enabled a number of enhancements, including adding the compose command directly into the Docker CLI, being able to “up” a Compose application on cloud platforms by simply switching the Docker context, and launching of Amazon ECS and Microsoft ACI. As the Compose specification evolves, new features land faster in the Docker CLI.

Compose V2 relies directly on the compose-go bindings which are maintained as part of the specification. This allows us to include community proposals, experimental implementations by the Docker CLI and/or Engine, and deliver features faster to users. Compose V2 also supports some of the newer additions to the specification, such as profiles and GPU devices.

Compose V2 has been re-written in Go, which improves integration with other Docker command-line features, and allows it to run natively on macOS on Apple silicon, Windows, and Linux, without dependencies such as Python.

For more information about compatibility with the compose v1 command-line, see the docker-compose compatibility list.

Features

The features of Compose that make it effective are:

Multiple isolated environments on a single host

Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts:

The default project name is the basename of the project directory. You can set a custom project name by using the -p command line option or the COMPOSE_PROJECT_NAME environment variable.

The default project directory is the base directory of the Compose file. A custom value for it can be defined with the --project-directory command line option.

Preserve volume data when containers are created

Compose preserves all volumes used by your services. When docker-compose up runs, if it finds any containers from previous runs, it copies the volumes from the old container to the new container. This process ensures that any data you’ve created in volumes isn’t lost.

If you use docker-compose on a Windows machine, see Environment variables and adjust the necessary environment variables for your specific needs.

Only recreate containers that have changed

Compose caches the configuration used to create a container. When you restart a service that has not changed, Compose re-uses the existing containers. Re-using containers means that you can make changes to your environment very quickly.

Variables and moving a composition between environments

Compose supports variables in the Compose file. You can use these variables to customize your composition for different environments, or different users. See Variable substitution for more details.

You can extend a Compose file using the extends field or by creating multiple Compose files. See extends for more details.

Common use cases

Compose can be used in many different ways. Some common use cases are outlined below.

Development environments

When you’re developing software, the ability to run an application in an isolated environment and interact with it is crucial. The Compose command line tool can be used to create the environment and interact with it.

The Compose file provides a way to document and configure all of the application’s service dependencies (databases, queues, caches, web service APIs, etc). Using the Compose command line tool you can create and start one or more containers for each dependency with a single command (docker-compose up).

Together, these features provide a convenient way for developers to get started on a project. Compose can reduce a multi-page “developer getting started guide” to a single machine readable Compose file and a few commands.

Automated testing environments

An important part of any Continuous Deployment or Continuous Integration process is the automated test suite. Automated end-to-end testing requires an environment in which to run tests. Compose provides a convenient way to create and destroy isolated testing environments for your test suite. By defining the full environment in a Compose file, you can create and destroy these environments in just a few commands:

$ docker-compose up -d
+$ ./run_tests
+$ docker-compose down
+

Single host deployments

Compose has traditionally been focused on development and testing workflows, but with each release we’re making progress on more production-oriented features.

For details on using production-oriented features, see compose in production in this documentation.

Release notes

To see a detailed list of changes for past and current releases of Docker Compose, refer to the CHANGELOG.

Getting help

Docker Compose is under active development. If you need help, would like to contribute, or simply want to talk about the project with like-minded individuals, we have a number of open channels for communication.

+

documentation, docs, docker, compose, orchestration, containers

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/ +

+
diff --git a/devdocs/docker/compose%2Finstall%2Findex.html b/devdocs/docker/compose%2Finstall%2Findex.html new file mode 100644 index 00000000..f6bafd5c --- /dev/null +++ b/devdocs/docker/compose%2Finstall%2Findex.html @@ -0,0 +1,36 @@ +

Install Docker Compose

+ +

This page contains information on how to install Docker Compose. You can run Compose on macOS, Windows, and 64-bit Linux.

Prerequisites

Docker Compose relies on Docker Engine for any meaningful work, so make sure you have Docker Engine installed either locally or remote, depending on your setup.

Install Compose

Follow the instructions below to install Compose on Mac, Windows, Windows Server, or Linux systems.

Install a different version

The instructions below outline installation of the current stable release (v2.5.0) of Compose. To install a different version of Compose, replace the given release number with the one that you want.

Compose releases are also listed and available for direct download on the Compose repository release page on GitHub.

To install the Python version of Compose, follow instructions in the Compose v1 GitHub branch.

Install Compose on macOS

Docker Desktop for Mac includes Compose along with other Docker apps, so Mac users do not need to install Compose separately. For installation instructions, see Install Docker Desktop on Mac.

Install Compose on Windows desktop systems

Docker Desktop for Windows includes Compose along with other Docker apps, so most Windows users do not need to install Compose separately. For install instructions, see Install Docker Desktop on Windows.

If you are running the Docker daemon and client directly on Microsoft Windows Server, follow the instructions in the Windows Server tab.

Install Compose on Windows Server

Follow these instructions if you are running the Docker daemon and client directly on Microsoft Windows Server and want to install Docker Compose.

  1. Start an “elevated” PowerShell (run it as administrator). Search for PowerShell, right-click, and choose Run as administrator. When asked if you want to allow this app to make changes to your device, click Yes.

  2. In PowerShell, since GitHub now requires TLS1.2, run the following:

    [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
    +

    Then run the following command to download the current stable release of Compose (v2.5.0):

    Invoke-WebRequest "https://github.com/docker/compose/releases/download/v2.5.0/docker-compose-Windows-x86_64.exe" -UseBasicParsing -OutFile $Env:ProgramFiles\Docker\docker-compose.exe
    +

    Note

    On Windows Server 2019, you can add the Compose executable to $Env:ProgramFiles\Docker. Because this directory is registered in the system PATH, you can run the docker-compose --version command on the subsequent step with no additional configuration.

    To install a different version of Compose, substitute v2.5.0 with the version of Compose you want to use.

  3. Test the installation.

    $ docker compose version
    +Docker Compose version v2.5.0
    +

Install Compose on Linux systems

You can install Docker Compose in different ways, depending on your needs:

Install using the convenience script

As Docker Compose is now part of the Docker CLI it can be installed via a convenience script with Docker Engine and the CLI.
Choose your Linux distribution and follow the instructions.

Install using the repository

If you already follow the instructions to install Docker Engine, Docker Compose should already be installed.
Otherwise, you can set up the Docker repository as mentioned in the Docker Engine installation, choose your Linux distribution and go to the Set up the repository section.

When finished

  1. Update the apt package index, and install the latest version of Docker Compose, or go to the next step to install a specific version:

     $ sudo apt-get update
    + $ sudo apt-get install docker-compose-plugin
    +
  2. To install a specific version of Docker Engine, list the available versions in the repo, then select and install:

    a. List the versions available in your repo:

    $ apt-cache madison docker-compose-plugin
    +
    +  docker-compose-plugin | 2.3.3~ubuntu-focal | https://download.docker.com/linux/ubuntu focal/stable arm64 Packages
    +

    b. Install a specific version using the version string from the second column, for example, 2.3.3~ubuntu-focal.

    $ sudo apt-get install docker-compose-plugin=<VERSION_STRING>
    +
  3. Verify that Docker Compose is installed correctly by checking the version.

    $ docker compose version
    +Docker Compose version v2.3.3
    +

Install the binary manually

On Linux, you can download the Docker Compose binary from the Compose repository release page on GitHub and copying it into $HOME/.docker/cli-plugins as docker-compose. Follow the instructions from the link, which involve running the curl command in your terminal to download the binaries. These step-by-step instructions are also included below.

  1. Run this command to download the current stable release of Docker Compose:

    $ DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker}
    +$ mkdir -p $DOCKER_CONFIG/cli-plugins
    +$ curl -SL https://github.com/docker/compose/releases/download/v2.5.0/docker-compose-linux-x86_64 -o $DOCKER_CONFIG/cli-plugins/docker-compose
    +

    This command installs Compose for the active user under $HOME directory. To install Docker Compose for all users on your system, replace ~/.docker/cli-plugins with /usr/local/lib/docker/cli-plugins.

    To install a different version of Compose, substitute v2.5.0 with the version of Compose you want to use.

  2. Apply executable permissions to the binary:

     $ chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose
    +

    or if you choose to install Compose for all users

     $ sudo chmod +x /usr/local/lib/docker/cli-plugins/docker-compose
    +
  3. Test the installation.

     $ docker compose version
    + Docker Compose version v2.5.0
    +

Install Compose as standalone binary on Linux systems

You can use Compose as a standalone binary without installing the Docker CLI.

  1. Run this command to download the current stable release of Docker Compose:
  $ curl -SL https://github.com/docker/compose/releases/download/v2.5.0/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose
+

To install a different version of Compose, substitute v2.5.0 with the version of Compose you want to use.

  1. Apply executable permissions to the binary:
  $ sudo chmod +x /usr/local/bin/docker-compose
+

Note:

If the command docker-compose fails after installation, check your path. You can also create a symbolic link to /usr/bin or any other directory in your path.

For example:

$ sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
+
  1. Test the installation.

     $ docker-compose --version
    + Docker Compose version v2.5.0
    +

Uninstallation

To uninstall Docker Compose if you installed using curl:

$ rm $DOCKER_CONFIG/cli-plugins/docker-compose
+

or if you choose to install Compose for all users

$ sudo rm /usr/local/lib/docker/cli-plugins/docker-compose
+

Got a “Permission denied” error?

If you get a “Permission denied” error using either of the above methods, you probably do not have the proper permissions to remove docker-compose. To force the removal, prepend sudo to either of the above commands and run again.

Where to go next

+

compose, orchestration, install, installation, docker, documentation

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/install/ +

+
diff --git a/devdocs/docker/compose%2Fnetworking%2Findex.html b/devdocs/docker/compose%2Fnetworking%2Findex.html new file mode 100644 index 00000000..04f9ffc5 --- /dev/null +++ b/devdocs/docker/compose%2Fnetworking%2Findex.html @@ -0,0 +1,82 @@ +

Networking in Compose

+ +

This page applies to Compose file formats version 2 and higher. Networking features are not supported for Compose file version 1 (deprecated).

By default Compose sets up a single network for your app. Each container for a service joins the default network and is both reachable by other containers on that network, and discoverable by them at a hostname identical to the container name.

Note

Your app’s network is given a name based on the “project name”, which is based on the name of the directory it lives in. You can override the project name with either the --project-name flag or the COMPOSE_PROJECT_NAME environment variable.

For example, suppose your app is in a directory called myapp, and your docker-compose.yml looks like this:

version: "3.9"
+services:
+  web:
+    build: .
+    ports:
+      - "8000:8000"
+  db:
+    image: postgres
+    ports:
+      - "8001:5432"
+

When you run docker-compose up, the following happens:

  1. A network called myapp_default is created.
  2. A container is created using web’s configuration. It joins the network myapp_default under the name web.
  3. A container is created using db’s configuration. It joins the network myapp_default under the name db.

In v2.1+, overlay networks are always attachable

Starting in Compose file format 2.1, overlay networks are always created as attachable, and this is not configurable. This means that standalone containers can connect to overlay networks.

In Compose file format 3.x, you can optionally set the attachable property to false.

Each container can now look up the hostname web or db and get back the appropriate container’s IP address. For example, web’s application code could connect to the URL postgres://db:5432 and start using the Postgres database.

It is important to note the distinction between HOST_PORT and CONTAINER_PORT. In the above example, for db, the HOST_PORT is 8001 and the container port is 5432 (postgres default). Networked service-to-service communication uses the CONTAINER_PORT. When HOST_PORT is defined, the service is accessible outside the swarm as well.

Within the web container, your connection string to db would look like postgres://db:5432, and from the host machine, the connection string would look like postgres://{DOCKER_IP}:8001.

Update containers

If you make a configuration change to a service and run docker-compose up to update it, the old container is removed and the new one joins the network under a different IP address but the same name. Running containers can look up that name and connect to the new address, but the old address stops working.

If any containers have connections open to the old container, they are closed. It is a container’s responsibility to detect this condition, look up the name again and reconnect.

Links allow you to define extra aliases by which a service is reachable from another service. They are not required to enable services to communicate - by default, any service can reach any other service at that service’s name. In the following example, db is reachable from web at the hostnames db and database:

version: "3.9"
+services:
+
+  web:
+    build: .
+    links:
+      - "db:database"
+  db:
+    image: postgres
+

See the links reference for more information.

Multi-host networking

When deploying a Compose application on a Docker Engine with Swarm mode enabled, you can make use of the built-in overlay driver to enable multi-host communication.

Consult the Swarm mode section, to see how to set up a Swarm cluster, and the Getting started with multi-host networking to learn about multi-host overlay networks.

Specify custom networks

Instead of just using the default app network, you can specify your own networks with the top-level networks key. This lets you create more complex topologies and specify custom network drivers and options. You can also use it to connect services to externally-created networks which aren’t managed by Compose.

Each service can specify what networks to connect to with the service-level networks key, which is a list of names referencing entries under the top-level networks key.

Here’s an example Compose file defining two custom networks. The proxy service is isolated from the db service, because they do not share a network in common - only app can talk to both.

version: "3.9"
+
+services:
+  proxy:
+    build: ./proxy
+    networks:
+      - frontend
+  app:
+    build: ./app
+    networks:
+      - frontend
+      - backend
+  db:
+    image: postgres
+    networks:
+      - backend
+
+networks:
+  frontend:
+    # Use a custom driver
+    driver: custom-driver-1
+  backend:
+    # Use a custom driver which takes special options
+    driver: custom-driver-2
+    driver_opts:
+      foo: "1"
+      bar: "2"
+

Networks can be configured with static IP addresses by setting the ipv4_address and/or ipv6_address for each attached network.

Networks can also be given a custom name (since version 3.5):

version: "3.9"
+services:
+  # ...
+networks:
+  frontend:
+    name: custom_frontend
+    driver: custom-driver-1
+

For full details of the network configuration options available, see the following references:

Configure the default network

Instead of (or as well as) specifying your own networks, you can also change the settings of the app-wide default network by defining an entry under networks named default:

version: "3.9"
+services:
+  web:
+    build: .
+    ports:
+      - "8000:8000"
+  db:
+    image: postgres
+
+networks:
+  default:
+    # Use a custom driver
+    driver: custom-driver-1
+

Use a pre-existing network

If you want your containers to join a pre-existing network, use the external option:

services:
+  # ...
+networks:
+  default:
+    name: my-pre-existing-network
+    external: true
+

Instead of attempting to create a network called [projectname]_default, Compose looks for a network called my-pre-existing-network and connect your app’s containers to it.

+

documentation, docs, docker, compose, orchestration, containers, networking

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/networking/ +

+
diff --git a/devdocs/docker/compose%2Fproduction%2Findex.html b/devdocs/docker/compose%2Fproduction%2Findex.html new file mode 100644 index 00000000..3b3adfd6 --- /dev/null +++ b/devdocs/docker/compose%2Fproduction%2Findex.html @@ -0,0 +1,13 @@ +

Use Compose in production

+ +

When you define your app with Compose in development, you can use this definition to run your application in different environments such as CI, staging, and production.

The easiest way to deploy an application is to run it on a single server, similar to how you would run your development environment. If you want to scale up your application, you can run Compose apps on a Swarm cluster.

Modify your Compose file for production

You probably need to make changes to your app configuration to make it ready for production. These changes may include:

For this reason, consider defining an additional Compose file, say production.yml, which specifies production-appropriate configuration. This configuration file only needs to include the changes you’d like to make from the original Compose file. The additional Compose file can be applied over the original docker-compose.yml to create a new configuration.

Once you’ve got a second configuration file, tell Compose to use it with the -f option:

$ docker-compose -f docker-compose.yml -f production.yml up -d
+

See Using multiple compose files for a more complete example.

Deploying changes

When you make changes to your app code, remember to rebuild your image and recreate your app’s containers. To redeploy a service called web, use:

$ docker-compose build web
+$ docker-compose up --no-deps -d web
+

This first rebuilds the image for web and then stop, destroy, and recreate just the web service. The --no-deps flag prevents Compose from also recreating any services which web depends on.

Running Compose on a single server

You can use Compose to deploy an app to a remote Docker host by setting the DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH environment variables appropriately.

Once you’ve set up your environment variables, all the normal docker-compose commands work with no further configuration.

Compose documentation

+

compose, orchestration, containers, production

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/production/ +

+
diff --git a/devdocs/docker/compose%2Fprofiles%2Findex.html b/devdocs/docker/compose%2Fprofiles%2Findex.html new file mode 100644 index 00000000..04f317b4 --- /dev/null +++ b/devdocs/docker/compose%2Fprofiles%2Findex.html @@ -0,0 +1,88 @@ +

Using profiles with Compose

+ +

Profiles allow adjusting the Compose application model for various usages and environments by selectively enabling services. This is achieved by assigning each service to zero or more profiles. If unassigned, the service is always started but if assigned, it is only started if the profile is activated.

This allows one to define additional services in a single docker-compose.yml file that should only be started in specific scenarios, e.g. for debugging or development tasks.

Assigning profiles to services

Services are associated with profiles through the profiles attribute which takes an array of profile names:

version: "3.9"
+services:
+  frontend:
+    image: frontend
+    profiles: ["frontend"]
+
+  phpmyadmin:
+    image: phpmyadmin
+    depends_on:
+      - db
+    profiles:
+      - debug
+
+  backend:
+    image: backend
+
+  db:
+    image: mysql
+

Here the services frontend and phpmyadmin are assigned to the profiles frontend and debug respectively and as such are only started when their respective profiles are enabled.

Services without a profiles attribute will always be enabled, i.e. in this case running docker-compose up would only start backend and db.

Valid profile names follow the regex format of [a-zA-Z0-9][a-zA-Z0-9_.-]+.

Note

The core services of your application should not be assigned profiles so they will always be enabled and automatically started.

Enabling profiles

To enable a profile supply the --profile command-line option or use the COMPOSE_PROFILES environment variable:

$ docker-compose --profile debug up
+$ COMPOSE_PROFILES=debug docker-compose up
+

The above command would both start your application with the debug profile enabled. Using the docker-compose.yml file above, this would start the services backend, db and phpmyadmin.

Multiple profiles can be specified by passing multiple --profile flags or a comma-separated list for the COMPOSE_PROFILES environment variable:

$ docker-compose --profile frontend --profile debug up
+$ COMPOSE_PROFILES=frontend,debug docker-compose up
+

Auto-enabling profiles and dependency resolution

When a service with assigned profiles is explicitly targeted on the command line its profiles will be enabled automatically so you don’t need to enable them manually. This can be used for one-off services and debugging tools. As an example consider this configuration:

version: "3.9"
+services:
+  backend:
+    image: backend
+
+  db:
+    image: mysql
+
+  db-migrations:
+    image: backend
+    command: myapp migrate
+    depends_on:
+      - db
+    profiles:
+      - tools
+
# will only start backend and db
+$ docker-compose up -d
+
+# this will run db-migrations (and - if necessary - start db)
+# by implicitly enabling profile `tools`
+$ docker-compose run db-migrations
+

But keep in mind that docker-compose will only automatically enable the profiles of the services on the command line and not of any dependencies. This means that all services the targeted service depends_on must have a common profile with it, be always enabled (by omitting profiles) or have a matching profile enabled explicitly:

version: "3.9"
+services:
+  web:
+    image: web
+
+  mock-backend:
+    image: backend
+    profiles: ["dev"]
+    depends_on:
+      - db
+
+  db:
+    image: mysql
+    profiles: ["dev"]
+
+  phpmyadmin:
+    image: phpmyadmin
+    profiles: ["debug"]
+    depends_on:
+      - db
+
# will only start "web"
+$ docker-compose up -d
+
+# this will start mock-backend (and - if necessary - db)
+# by implicitly enabling profile `dev`
+$ docker-compose up -d mock-backend
+
+# this will fail because profile "dev" is disabled
+$ docker-compose up phpmyadmin
+

Although targeting phpmyadmin will automatically enable its profiles - i.e. debug - it will not automatically enable the profile(s) required by db - i.e. dev. To fix this you either have to add the debug profile to the db service:

db:
+  image: mysql
+  profiles: ["debug", "dev"]
+

or enable a profile of db explicitly:

# profile "debug" is enabled automatically by targeting phpmyadmin
+$ docker-compose --profile dev up phpmyadmin
+$ COMPOSE_PROFILES=dev docker-compose up phpmyadmin
+

Compose documentation

+

cli, compose, profile, profiles reference

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/profiles/ +

+
diff --git a/devdocs/docker/compose%2Freference%2Fconfig%2Findex.html b/devdocs/docker/compose%2Freference%2Fconfig%2Findex.html new file mode 100644 index 00000000..23616cc4 --- /dev/null +++ b/devdocs/docker/compose%2Freference%2Fconfig%2Findex.html @@ -0,0 +1,21 @@ +

docker-compose config

+
Usage: docker-compose config [options]
+
+Options:
+    --resolve-image-digests  Pin image tags to digests.
+    --no-interpolate         Don't interpolate environment variables.
+    -q, --quiet              Only validate the configuration, don't print
+                             anything.
+    --services               Print the service names, one per line.
+    --volumes                Print the volume names, one per line.
+    --hash="*"               Print the service config hash, one per line.
+                             Set "service1,service2" for a list of specified services
+                             or use the wildcard symbol to display all services.
+

Validate and view the Compose file.

+

fig, composition, compose, docker, orchestration, cli, config

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/reference/config/ +

+
diff --git a/devdocs/docker/compose%2Freference%2Fenvvars%2Findex.html b/devdocs/docker/compose%2Freference%2Fenvvars%2Findex.html new file mode 100644 index 00000000..2dc8898e --- /dev/null +++ b/devdocs/docker/compose%2Freference%2Fenvvars%2Findex.html @@ -0,0 +1,10 @@ +

Compose CLI environment variables

+ +

Several environment variables are available for you to configure the Docker Compose command-line behaviour.

Variables starting with DOCKER_ are the same as those used to configure the Docker command-line client. If you’re using docker-machine, then the eval "$(docker-machine env my-docker-vm)" command should set them to their correct values. (In this example, my-docker-vm is the name of a machine you created.)

Note: Some of these variables can also be provided using an environment file.

COMPOSE_PROJECT_NAME

Sets the project name. This value is prepended along with the service name to the container on start up. For example, if your project name is myapp and it includes two services db and web, then Compose starts containers named myapp-db-1 and myapp-web-1 respectively.

Setting this is optional. If you do not set this, the COMPOSE_PROJECT_NAME defaults to the basename of the project directory. See also the -p command-line option.

COMPOSE_FILE

Specify the path to a Compose file. If not provided, Compose looks for a file named docker-compose.yml in the current directory and then each parent directory in succession until a file by that name is found.

This variable supports multiple Compose files separated by a path separator (on Linux and macOS the path separator is :, on Windows it is ;). For example: COMPOSE_FILE=docker-compose.yml:docker-compose.prod.yml. The path separator can also be customized using COMPOSE_PATH_SEPARATOR.

See also the -f command-line option.

COMPOSE_PROFILES

Specify one or multiple active profiles to enable. Calling docker-compose up with COMPOSE_PROFILES=frontend will start the services with the profile frontend and services without specified profiles.

You can specify a list of profiles separated with a comma: COMPOSE_PROFILES=frontend,debug will enable the profiles frontend and debug.

See also Using profiles with Compose and the --profile command-line option.

COMPOSE_API_VERSION

The Docker API only supports requests from clients which report a specific version. If you receive a client and server don't have same version error using docker-compose, you can workaround this error by setting this environment variable. Set the version value to match the server version.

Setting this variable is intended as a workaround for situations where you need to run temporarily with a mismatch between the client and server version. For example, if you can upgrade the client but need to wait to upgrade the server.

Running with this variable set and a known mismatch does prevent some Docker features from working properly. The exact features that fail would depend on the Docker client and server versions. For this reason, running with this variable set is only intended as a workaround and it is not officially supported.

If you run into problems running with this set, resolve the mismatch through upgrade and remove this setting to see if your problems resolve before notifying support.

DOCKER_HOST

Sets the URL of the docker daemon. As with the Docker client, defaults to unix:///var/run/docker.sock.

DOCKER_TLS_VERIFY

When set to anything other than an empty string, enables TLS communication with the docker daemon.

DOCKER_CERT_PATH

Configures the path to the ca.pem, cert.pem, and key.pem files used for TLS verification. Defaults to ~/.docker.

COMPOSE_HTTP_TIMEOUT

Configures the time (in seconds) a request to the Docker daemon is allowed to hang before Compose considers it failed. Defaults to 60 seconds.

COMPOSE_TLS_VERSION

Configure which TLS version is used for TLS communication with the docker daemon. Defaults to TLSv1. Supported values are: TLSv1, TLSv1_1, TLSv1_2.

COMPOSE_CONVERT_WINDOWS_PATHS

Enable path conversion from Windows-style to Unix-style in volume definitions. Users of Docker Machine on Windows should always set this. Defaults to 0. Supported values: true or 1 to enable, false or 0 to disable.

COMPOSE_PATH_SEPARATOR

If set, the value of the COMPOSE_FILE environment variable is separated using this character as path separator.

COMPOSE_FORCE_WINDOWS_HOST

If set, volume declarations using the short syntax are parsed assuming the host path is a Windows path, even if Compose is running on a UNIX-based system. Supported values: true or 1 to enable, false or 0 to disable.

COMPOSE_IGNORE_ORPHANS

If set, Compose doesn’t try to detect orphaned containers for the project. Supported values: true or 1 to enable, false or 0 to disable.

COMPOSE_PARALLEL_LIMIT

Sets a limit for the number of operations Compose can execute in parallel. The default value is 64, and may not be set lower than 2.

COMPOSE_INTERACTIVE_NO_CLI

If set, Compose doesn’t attempt to use the Docker CLI for interactive run and exec operations. This option is not available on Windows where the CLI is required for the aforementioned operations. Supported: true or 1 to enable, false or 0 to disable.

COMPOSE_DOCKER_CLI_BUILD

Configure whether to use the Compose python client for building images or the native docker cli. By default, Compose uses the docker CLI to perform builds, which allows you to use BuildKit to perform builds.

Set COMPOSE_DOCKER_CLI_BUILD=0 to disable native builds, and to use the built-in python client.

+

fig, composition, compose, docker, orchestration, cli, reference

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/reference/envvars/ +

+
diff --git a/devdocs/docker/compose%2Freference%2Findex.html b/devdocs/docker/compose%2Freference%2Findex.html new file mode 100644 index 00000000..54605d8a --- /dev/null +++ b/devdocs/docker/compose%2Freference%2Findex.html @@ -0,0 +1,96 @@ +

Overview of docker-compose CLI

+ +

This page provides the usage information for the docker-compose Command.

Command options overview and help

You can also see this information by running docker-compose --help from the command line.

Define and run multi-container applications with Docker.
+
+Usage:
+  docker-compose [-f <arg>...] [--profile <name>...] [options] [COMMAND] [ARGS...]
+  docker-compose -h|--help
+
+Options:
+  -f, --file FILE             Specify an alternate compose file
+                              (default: docker-compose.yml)
+  -p, --project-name NAME     Specify an alternate project name
+                              (default: directory name)
+  --profile NAME              Specify a profile to enable
+  --verbose                   Show more output
+  --log-level LEVEL           DEPRECATED and not working from 2.0 - Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
+  --no-ansi                   Do not print ANSI control characters
+  -v, --version               Print version and exit
+  -H, --host HOST             Daemon socket to connect to
+
+  --tls                       Use TLS; implied by --tlsverify
+  --tlscacert CA_PATH         Trust certs signed only by this CA
+  --tlscert CLIENT_CERT_PATH  Path to TLS certificate file
+  --tlskey TLS_KEY_PATH       Path to TLS key file
+  --tlsverify                 Use TLS and verify the remote
+  --skip-hostname-check       Don't check the daemon's hostname against the
+                              name specified in the client certificate
+  --project-directory PATH    Specify an alternate working directory
+                              (default: the path of the Compose file)
+  --compatibility             If set, Compose will attempt to convert deploy
+                              keys in v3 files to their non-Swarm equivalent
+
+Commands:
+  build              Build or rebuild services
+  bundle             Generate a Docker bundle from the Compose file
+  config             Validate and view the Compose file
+  create             Create services
+  down               Stop and remove containers, networks, images, and volumes
+  events             Receive real time events from containers
+  exec               Execute a command in a running container
+  help               Get help on a command
+  images             List images
+  kill               Kill containers
+  logs               View output from containers
+  pause              Pause services
+  port               Print the public port for a port binding
+  ps                 List containers
+  pull               Pull service images
+  push               Push service images
+  restart            Restart services
+  rm                 Remove stopped containers
+  run                Run a one-off command
+  scale              Set number of containers for a service
+  start              Start services
+  stop               Stop services
+  top                Display the running processes
+  unpause            Unpause services
+  up                 Create and start containers
+  version            Show the Docker-Compose version information
+

You can use Docker Compose binary, docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...], to build and manage multiple services in Docker containers.

Use -f to specify name and path of one or more Compose files

Use the -f flag to specify the location of a Compose configuration file.

Specifying multiple Compose files

You can supply multiple -f configuration files. When you supply multiple files, Compose combines them into a single configuration. Compose builds the configuration in the order you supply the files. Subsequent files override and add to their predecessors.

For example, consider this command line:

$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db
+

The docker-compose.yml file might specify a webapp service.

webapp:
+  image: examples/web
+  ports:
+    - "8000:8000"
+  volumes:
+    - "/data"
+

If the docker-compose.admin.yml also specifies this same service, any matching fields override the previous file. New values, add to the webapp service configuration.

webapp:
+  build: .
+  environment:
+    - DEBUG=1
+

When you use multiple Compose files, all paths in the files are relative to the first configuration file specified with -f. You can use the --project-directory option to override this base path.

Use a -f with - (dash) as the filename to read the configuration from stdin. When stdin is used all paths in the configuration are relative to the current working directory.

The -f flag is optional. If you don’t provide this flag on the command line, Compose traverses the working directory and its parent directories looking for a docker-compose.yml and a docker-compose.override.yml file. You must supply at least the docker-compose.yml file. If both files are present on the same directory level, Compose combines the two files into a single configuration.

The configuration in the docker-compose.override.yml file is applied over and in addition to the values in the docker-compose.yml file.

Specifying a path to a single Compose file

You can use the -f flag to specify a path to a Compose file that is not located in the current directory, either from the command line or by setting up a COMPOSE_FILE environment variable in your shell or in an environment file.

For an example of using the -f option at the command line, suppose you are running the Compose Rails sample, and have a docker-compose.yml file in a directory called sandbox/rails. You can use a command like docker-compose pull to get the postgres image for the db service from anywhere by using the -f flag as follows: docker-compose -f ~/sandbox/rails/docker-compose.yml pull db

Here’s the full example:

$ docker-compose -f ~/sandbox/rails/docker-compose.yml pull db
+Pulling db (postgres:latest)...
+latest: Pulling from library/postgres
+ef0380f84d05: Pull complete
+50cf91dc1db8: Pull complete
+d3add4cd115c: Pull complete
+467830d8a616: Pull complete
+089b9db7dc57: Pull complete
+6fba0a36935c: Pull complete
+81ef0e73c953: Pull complete
+338a6c4894dc: Pull complete
+15853f32f67c: Pull complete
+044c83d92898: Pull complete
+17301519f133: Pull complete
+dcca70822752: Pull complete
+cecf11b8ccf3: Pull complete
+Digest: sha256:1364924c753d5ff7e2260cd34dc4ba05ebd40ee8193391220be0f9901d4e1651
+Status: Downloaded newer image for postgres:latest
+

Use -p to specify a project name

Each configuration has a project name. If you supply a -p flag, you can specify a project name. If you don’t specify the flag, Compose uses the current directory name. See also the COMPOSE_PROJECT_NAME environment variable.

Use --profile to specify one or more active profiles

Calling docker-compose --profile frontend up will start the services with the profile frontend and services without specified profiles. You can also enable multiple profiles, e.g. with docker-compose --profile frontend --profile debug up the profiles frontend and debug will be enabled.

See also Using profiles with Compose and the COMPOSE_PROFILES environment variable.

Set up environment variables

You can set environment variables for various docker-compose options, including the -f and -p flags.

For example, the COMPOSE_FILE environment variable relates to the -f flag, and COMPOSE_PROJECT_NAME environment variable relates to the -p flag.

Also, you can set some of these variables in an environment file.

Where to go next

+

fig, composition, compose, docker, orchestration, cli, reference, docker-compose

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/reference/ +

+
diff --git a/devdocs/docker/compose%2Freference%2Fpull%2Findex.html b/devdocs/docker/compose%2Freference%2Fpull%2Findex.html new file mode 100644 index 00000000..4ca25246 --- /dev/null +++ b/devdocs/docker/compose%2Freference%2Fpull%2Findex.html @@ -0,0 +1,48 @@ +

docker-compose pull

+
Usage: docker-compose pull [options] [SERVICE...]
+
+Options:
+    --ignore-pull-failures  Pull what it can and ignores images with pull failures.
+    --parallel              Deprecated, pull multiple images in parallel (enabled by default).
+    --no-parallel           Disable parallel pulling.
+    -q, --quiet             Pull without printing progress information
+    --include-deps          Also pull services declared as dependencies
+

Pulls an image associated with a service defined in a docker-compose.yml or docker-stack.yml file, but does not start containers based on those images.

For example, suppose you have this docker-compose.yml file from the Quickstart: Compose and Rails sample.

version: '2'
+services:
+  db:
+    image: postgres
+  web:
+    build: .
+    command: bundle exec rails s -p 3000 -b '0.0.0.0'
+    volumes:
+      - .:/myapp
+    ports:
+      - "3000:3000"
+    depends_on:
+      - db
+

If you run docker-compose pull ServiceName in the same directory as the docker-compose.yml file that defines the service, Docker pulls the associated image. For example, to call the postgres image configured as the db service in our example, you would run docker-compose pull db.

$ docker-compose pull db
+Pulling db (postgres:latest)...
+latest: Pulling from library/postgres
+cd0a524342ef: Pull complete
+9c784d04dcb0: Pull complete
+d99dddf7e662: Pull complete
+e5bff71e3ce6: Pull complete
+cb3e0a865488: Pull complete
+31295d654cd5: Pull complete
+fc930a4e09f5: Pull complete
+8650cce8ef01: Pull complete
+61949acd8e52: Pull complete
+527a203588c0: Pull complete
+26dec14ac775: Pull complete
+0efc0ed5a9e5: Pull complete
+40cd26695b38: Pull complete
+Digest: sha256:fd6c0e2a9d053bebb294bb13765b3e01be7817bf77b01d58c2377ff27a4a46dc
+Status: Downloaded newer image for postgres:latest
+
+

fig, composition, compose, docker, orchestration, cli, pull

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/reference/pull/ +

+
diff --git a/devdocs/docker/compose%2Freference%2Fstop%2Findex.html b/devdocs/docker/compose%2Freference%2Fstop%2Findex.html new file mode 100644 index 00000000..a95b0488 --- /dev/null +++ b/devdocs/docker/compose%2Freference%2Fstop%2Findex.html @@ -0,0 +1,14 @@ +

docker-compose stop

+
Usage: docker-compose stop [options] [SERVICE...]
+
+Options:
+  -t, --timeout TIMEOUT      Specify a shutdown timeout in seconds.
+                             (default: 10)
+

Stops running containers without removing them. They can be started again with docker-compose start.

+

fig, composition, compose, docker, orchestration, cli, stop

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/reference/stop/ +

+
diff --git a/devdocs/docker/compose%2Freference%2Fup%2Findex.html b/devdocs/docker/compose%2Freference%2Fup%2Findex.html new file mode 100644 index 00000000..77a97939 --- /dev/null +++ b/devdocs/docker/compose%2Freference%2Fup%2Findex.html @@ -0,0 +1,43 @@ +

docker-compose up

+ +
Usage: docker-compose up [options] [--scale SERVICE=NUM...] [SERVICE...]
+
+Options:
+    -d, --detach               Detached mode: Run containers in the background,
+                               print new container names. Incompatible with
+                               --abort-on-container-exit.
+    --no-color                 Produce monochrome output.
+    --quiet-pull               Pull without printing progress information
+    --no-deps                  Don't start linked services.
+    --force-recreate           Recreate containers even if their configuration
+                               and image haven't changed.
+    --always-recreate-deps     Recreate dependent containers.
+                               Incompatible with --no-recreate.
+    --no-recreate              If containers already exist, don't recreate
+                               them. Incompatible with --force-recreate and 
+                               --renew-anon-volumes.
+    --no-build                 Don't build an image, even if it's missing.
+    --no-start                 Don't start the services after creating them.
+    --build                    Build images before starting containers.
+    --abort-on-container-exit  Stops all containers if any container was
+                               stopped. Incompatible with --detach.
+    --attach-dependencies      Attach to dependent containers.
+    -t, --timeout TIMEOUT      Use this timeout in seconds for container
+                               shutdown when attached or when containers are
+                               already running. (default: 10)
+    -V, --renew-anon-volumes   Recreate anonymous volumes instead of retrieving
+                               data from the previous containers.
+    --remove-orphans           Remove containers for services not defined
+                               in the Compose file.
+    --exit-code-from SERVICE   Return the exit code of the selected service
+                               container. Implies --abort-on-container-exit.
+    --scale SERVICE=NUM        Scale SERVICE to NUM instances. Overrides the
+                               `scale` setting in the Compose file if present.
+

Builds, (re)creates, starts, and attaches to containers for a service.

Unless they are already running, this command also starts any linked services.

The docker-compose up command aggregates the output of each container (essentially running docker-compose logs --follow). When the command exits, all containers are stopped. Running docker-compose up --detach starts the containers in the background and leaves them running.

If there are existing containers for a service, and the service’s configuration or image was changed after the container’s creation, docker-compose up picks up the changes by stopping and recreating the containers (preserving mounted volumes). To prevent Compose from picking up changes, use the --no-recreate flag.

If you want to force Compose to stop and recreate all containers, use the --force-recreate flag.

If the process encounters an error, the exit code for this command is 1.
If the process is interrupted using SIGINT (ctrl + C) or SIGTERM, the containers are stopped, and the exit code is 0.
If SIGINT or SIGTERM is sent again during this shutdown phase, the running containers are killed, and the exit code is 2.

+

fig, composition, compose, docker, orchestration, cli, up

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/reference/up/ +

+
diff --git a/devdocs/docker/compose%2Fsamples-for-compose%2Findex.html b/devdocs/docker/compose%2Fsamples-for-compose%2Findex.html new file mode 100644 index 00000000..1393d990 --- /dev/null +++ b/devdocs/docker/compose%2Fsamples-for-compose%2Findex.html @@ -0,0 +1,11 @@ +

Sample apps with Compose

+

The following samples show the various aspects of how to work with Docker Compose. As a prerequisite, be sure to install Docker Compose if you have not already done so.

Key concepts these samples cover

The samples should help you to:

Samples tailored to demo Compose

These samples focus specifically on Docker Compose:

Awesome Compose samples

The Awesome Compose samples provide a starting point on how to integrate different frameworks and technologies using Docker Compose. All samples are available in the Awesome-compose GitHub repo.

+

documentation, docs, docker, compose, samples

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/samples-for-compose/ +

+
diff --git a/devdocs/docker/compose%2Fstartup-order%2Findex.html b/devdocs/docker/compose%2Fstartup-order%2Findex.html new file mode 100644 index 00000000..8d747220 --- /dev/null +++ b/devdocs/docker/compose%2Fstartup-order%2Findex.html @@ -0,0 +1,37 @@ +

Control startup and shutdown order in Compose

+ +

You can control the order of service startup and shutdown with the depends_on option. Compose always starts and stops containers in dependency order, where dependencies are determined by depends_on, links, volumes_from, and network_mode: "service:...".

However, for startup Compose does not wait until a container is “ready” (whatever that means for your particular application) - only until it’s running. There’s a good reason for this.

The problem of waiting for a database (for example) to be ready is really just a subset of a much larger problem of distributed systems. In production, your database could become unavailable or move hosts at any time. Your application needs to be resilient to these types of failures.

To handle this, design your application to attempt to re-establish a connection to the database after a failure. If the application retries the connection, it can eventually connect to the database.

The best solution is to perform this check in your application code, both at startup and whenever a connection is lost for any reason. However, if you don’t need this level of resilience, you can work around the problem with a wrapper script:

Compose documentation

+

documentation, docs, docker, compose, startup, shutdown, order

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/compose/startup-order/ +

+
diff --git a/devdocs/docker/engine%2Fapi%2Findex.html b/devdocs/docker/engine%2Fapi%2Findex.html new file mode 100644 index 00000000..724aeecf --- /dev/null +++ b/devdocs/docker/engine%2Fapi%2Findex.html @@ -0,0 +1,31 @@ +

Develop with Docker Engine API

+ +

Docker provides an API for interacting with the Docker daemon (called the Docker Engine API), as well as SDKs for Go and Python. The SDKs allow you to build and scale Docker apps and solutions quickly and easily. If Go or Python don’t work for you, you can use the Docker Engine API directly.

For information about Docker Engine SDKs, see Develop with Docker Engine SDKs.

The Docker Engine API is a RESTful API accessed by an HTTP client such as wget or curl, or the HTTP library which is part of most modern programming languages.

View the API reference

You can view the reference for the latest version of the API or choose a specific version.

Versioned API and SDK

The version of the Docker Engine API you should use depends upon the version of your Docker daemon and Docker client.

A given version of the Docker Engine SDK supports a specific version of the Docker Engine API, as well as all earlier versions. If breaking changes occur, they are documented prominently.

Daemon and client API mismatches

The Docker daemon and client do not necessarily need to be the same version at all times. However, keep the following in mind.

A new version of the API is released when new features are added. The Docker API is backward-compatible, so you do not need to update code that uses the API unless you need to take advantage of new features.

To see the highest version of the API your Docker daemon and client support, use docker version:

$ docker version
+
+Client: Docker Engine - Community
+ Version:           20.10.0
+ API version:       1.41
+ Go version:        go1.13.15
+ Git commit:        7287ab3
+ Built:             Tue Dec  8 19:00:39 2020
+ OS/Arch:           linux/amd64
+ Context:           default
+ Experimental:      true
+Server: Docker Engine - Community
+ Engine:
+  Version:          20.10.0
+  API version:      1.41 (minimum version 1.12)
+  Go version:       go1.13.15
+  Git commit:       eeddea2
+  Built:            Tue Dec  8 18:58:12 2020
+  OS/Arch:          linux/amd64
+  ...
+

You can specify the API version to use, in one of the following ways:

API version matrix

Docker version Maximum API version Change log
20.10 1.41 changes
19.03 1.40 changes
18.09 1.39 changes
18.06 1.38 changes
18.05 1.37 changes
18.04 1.37 changes
18.03 1.37 changes
18.02 1.36 changes
17.12 1.35 changes
17.11 1.34 changes
17.10 1.33 changes
17.09 1.32 changes
17.07 1.31 changes
17.06 1.30 changes
17.05 1.29 changes
17.04 1.28 changes
17.03.1 1.27 changes
17.03 1.26 changes
1.13.1 1.26 changes
1.13 1.25 changes
1.12 1.24 changes
1.11 1.23 changes
1.10 1.22 changes
1.9 1.21 changes
1.8 1.20 changes
1.7 1.19 changes
1.6 1.18 changes

Archived API versions

Documentation for older versions of the API has been archived, but can be found in the docker code repository on GitHub

+

developing, api

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/api/ +

+
diff --git a/devdocs/docker/engine%2Fapi%2Fsdk%2Fexamples%2Findex.html b/devdocs/docker/engine%2Fapi%2Fsdk%2Fexamples%2Findex.html new file mode 100644 index 00000000..05d1efd8 --- /dev/null +++ b/devdocs/docker/engine%2Fapi%2Fsdk%2Fexamples%2Findex.html @@ -0,0 +1,435 @@ +

Examples using the Docker Engine SDKs and Docker API

+ +

After you install Docker, you can install the Go or Python SDK and also try out the Docker Engine API.

Each of these examples show how to perform a given Docker operation using the Go and Python SDKs and the HTTP API using curl.

Run a container

This first example shows how to run a container using the Docker API. On the command line, you would use the docker run command, but this is just as easy to do from your own apps too.

This is the equivalent of typing docker run alpine echo hello world at the command prompt:

package main
+
+import (
+	"context"
+	"io"
+	"os"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/pkg/stdcopy"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	reader, err := cli.ImagePull(ctx, "docker.io/library/alpine", types.ImagePullOptions{})
+	if err != nil {
+		panic(err)
+	}
+
+	defer reader.Close()
+	io.Copy(os.Stdout, reader)
+
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image: "alpine",
+		Cmd:   []string{"echo", "hello world"},
+		Tty:   false,
+	}, nil, nil, nil, "")
+	if err != nil {
+		panic(err)
+	}
+
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		panic(err)
+	}
+
+	statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
+	select {
+	case err := <-errCh:
+		if err != nil {
+			panic(err)
+		}
+	case <-statusCh:
+	}
+
+	out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true})
+	if err != nil {
+		panic(err)
+	}
+
+	stdcopy.StdCopy(os.Stdout, os.Stderr, out)
+}
+
import docker
+client = docker.from_env()
+print(client.containers.run("alpine", ["echo", "hello", "world"]))
+
$ curl --unix-socket /var/run/docker.sock -H "Content-Type: application/json" \
+  -d '{"Image": "alpine", "Cmd": ["echo", "hello world"]}' \
+  -X POST http://localhost/v1.41/containers/create
+{"Id":"1c6594faf5","Warnings":null}
+
+$ curl --unix-socket /var/run/docker.sock -X POST http://localhost/v1.41/containers/1c6594faf5/start
+
+$ curl --unix-socket /var/run/docker.sock -X POST http://localhost/v1.41/containers/1c6594faf5/wait
+{"StatusCode":0}
+
+$ curl --unix-socket /var/run/docker.sock "http://localhost/v1.41/containers/1c6594faf5/logs?stdout=1"
+hello world
+

When using cURL to connect over a unix socket, the hostname is not important. The examples above use localhost, but any hostname would work.

Using cURL 7.47.0 or below?

The examples above assume you are using cURL 7.50.0 or above. Older versions of cURL used a non-standard URL notation when using a socket connection.

If you are using an older version of cURL, use http:/<API version>/ instead, for example, http:/v1.41/containers/1c6594faf5/start

Run a container in the background

You can also run containers in the background, the equivalent of typing docker run -d bfirsh/reticulate-splines:

package main
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"os"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	imageName := "bfirsh/reticulate-splines"
+
+	out, err := cli.ImagePull(ctx, imageName, types.ImagePullOptions{})
+	if err != nil {
+		panic(err)
+	}
+	defer out.Close()
+	io.Copy(os.Stdout, out)
+
+	resp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image: imageName,
+	}, nil, nil, nil, "")
+	if err != nil {
+		panic(err)
+	}
+
+	if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+		panic(err)
+	}
+
+	fmt.Println(resp.ID)
+}
+
import docker
+client = docker.from_env()
+container = client.containers.run("bfirsh/reticulate-splines", detach=True)
+print(container.id)
+
$ curl --unix-socket /var/run/docker.sock -H "Content-Type: application/json" \
+  -d '{"Image": "bfirsh/reticulate-splines"}' \
+  -X POST http://localhost/v1.41/containers/create
+{"Id":"1c6594faf5","Warnings":null}
+
+$ curl --unix-socket /var/run/docker.sock -X POST http://localhost/v1.41/containers/1c6594faf5/start
+

List and manage containers

You can use the API to list containers that are running, just like using docker ps:

package main
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	containers, err := cli.ContainerList(ctx, types.ContainerListOptions{})
+	if err != nil {
+		panic(err)
+	}
+
+	for _, container := range containers {
+		fmt.Println(container.ID)
+	}
+}
+
import docker
+client = docker.from_env()
+for container in client.containers.list():
+  print(container.id)
+
$ curl --unix-socket /var/run/docker.sock http://localhost/v1.41/containers/json
+[{
+  "Id":"ae63e8b89a26f01f6b4b2c9a7817c31a1b6196acf560f66586fbc8809ffcd772",
+  "Names":["/tender_wing"],
+  "Image":"bfirsh/reticulate-splines",
+  ...
+}]
+

Stop all running containers

Now that you know what containers exist, you can perform operations on them. This example stops all running containers.

Note: Don’t run this on a production server. Also, if you are using swarm services, the containers stop, but Docker creates new ones to keep the service running in its configured state.

package main
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	containers, err := cli.ContainerList(ctx, types.ContainerListOptions{})
+	if err != nil {
+		panic(err)
+	}
+
+	for _, container := range containers {
+		fmt.Print("Stopping container ", container.ID[:10], "... ")
+		if err := cli.ContainerStop(ctx, container.ID, nil); err != nil {
+			panic(err)
+		}
+		fmt.Println("Success")
+	}
+}
+
import docker
+client = docker.from_env()
+for container in client.containers.list():
+  container.stop()
+
$ curl --unix-socket /var/run/docker.sock http://localhost/v1.41/containers/json
+[{
+  "Id":"ae63e8b89a26f01f6b4b2c9a7817c31a1b6196acf560f66586fbc8809ffcd772",
+  "Names":["/tender_wing"],
+  "Image":"bfirsh/reticulate-splines",
+  ...
+}]
+
+$ curl --unix-socket /var/run/docker.sock \
+  -X POST http://localhost/v1.41/containers/ae63e8b89a26/stop
+

You can also perform actions on individual containers. This example prints the logs of a container given its ID. You need to modify the code before running it to change the hard-coded ID of the container to print the logs for.

package main
+
+import (
+	"context"
+	"io"
+	"os"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	options := types.ContainerLogsOptions{ShowStdout: true}
+	// Replace this ID with a container that really exists
+	out, err := cli.ContainerLogs(ctx, "f1064a8a4c82", options)
+	if err != nil {
+		panic(err)
+	}
+
+	io.Copy(os.Stdout, out)
+}
+
import docker
+client = docker.from_env()
+container = client.containers.get('f1064a8a4c82')
+print(container.logs())
+
$ curl --unix-socket /var/run/docker.sock "http://localhost/v1.41/containers/ca5f55cdb/logs?stdout=1"
+Reticulating spline 1...
+Reticulating spline 2...
+Reticulating spline 3...
+Reticulating spline 4...
+Reticulating spline 5...
+

List all images

List the images on your Engine, similar to docker image ls:

package main
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	images, err := cli.ImageList(ctx, types.ImageListOptions{})
+	if err != nil {
+		panic(err)
+	}
+
+	for _, image := range images {
+		fmt.Println(image.ID)
+	}
+}
+
import docker
+client = docker.from_env()
+for image in client.images.list():
+  print(image.id)
+
$ curl --unix-socket /var/run/docker.sock http://localhost/v1.41/images/json
+[{
+  "Id":"sha256:31d9a31e1dd803470c5a151b8919ef1988ac3efd44281ac59d43ad623f275dcd",
+  "ParentId":"sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96",
+  ...
+}]
+

Pull an image

Pull an image, like docker pull:

package main
+
+import (
+	"context"
+	"io"
+	"os"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	out, err := cli.ImagePull(ctx, "alpine", types.ImagePullOptions{})
+	if err != nil {
+		panic(err)
+	}
+
+	defer out.Close()
+
+	io.Copy(os.Stdout, out)
+}
+
import docker
+client = docker.from_env()
+image = client.images.pull("alpine")
+print(image.id)
+
$ curl --unix-socket /var/run/docker.sock \
+  -X POST "http://localhost/v1.41/images/create?fromImage=alpine"
+{"status":"Pulling from library/alpine","id":"3.1"}
+{"status":"Pulling fs layer","progressDetail":{},"id":"8f13703509f7"}
+{"status":"Downloading","progressDetail":{"current":32768,"total":2244027},"progress":"[\u003e                                                  ] 32.77 kB/2.244 MB","id":"8f13703509f7"}
+...
+

Pull an image with authentication

Pull an image, like docker pull, with authentication:

Note: Credentials are sent in the clear. Docker’s official registries use HTTPS. Private registries should also be configured to use HTTPS.

package main
+
+import (
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"io"
+	"os"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	authConfig := types.AuthConfig{
+		Username: "username",
+		Password: "password",
+	}
+	encodedJSON, err := json.Marshal(authConfig)
+	if err != nil {
+		panic(err)
+	}
+	authStr := base64.URLEncoding.EncodeToString(encodedJSON)
+
+	out, err := cli.ImagePull(ctx, "alpine", types.ImagePullOptions{RegistryAuth: authStr})
+	if err != nil {
+		panic(err)
+	}
+
+	defer out.Close()
+	io.Copy(os.Stdout, out)
+}
+

The Python SDK retrieves authentication information from the credentials store file and integrates with credential helpers. It is possible to override these credentials, but that is out of scope for this Getting Started guide. After using docker login, the Python SDK uses these credentials automatically.

import docker
+client = docker.from_env()
+image = client.images.pull("alpine")
+print(image.id)
+

This example leaves the credentials in your shell’s history, so consider this a naive implementation. The credentials are passed as a Base-64-encoded JSON structure.

$ JSON=$(echo '{"username": "string", "password": "string", "serveraddress": "string"}' | base64)
+
+$ curl --unix-socket /var/run/docker.sock \
+  -H "Content-Type: application/tar"
+  -X POST "http://localhost/v1.41/images/create?fromImage=alpine"
+  -H "X-Registry-Auth"
+  -d "$JSON"
+{"status":"Pulling from library/alpine","id":"3.1"}
+{"status":"Pulling fs layer","progressDetail":{},"id":"8f13703509f7"}
+{"status":"Downloading","progressDetail":{"current":32768,"total":2244027},"progress":"[\u003e                                                  ] 32.77 kB/2.244 MB","id":"8f13703509f7"}
+...
+

Commit a container

Commit a container to create an image from its contents:

package main
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/client"
+)
+
+func main() {
+	ctx := context.Background()
+	cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+	if err != nil {
+		panic(err)
+	}
+
+	createResp, err := cli.ContainerCreate(ctx, &container.Config{
+		Image: "alpine",
+		Cmd:   []string{"touch", "/helloworld"},
+	}, nil, nil, nil, "")
+	if err != nil {
+		panic(err)
+	}
+
+	if err := cli.ContainerStart(ctx, createResp.ID, types.ContainerStartOptions{}); err != nil {
+		panic(err)
+	}
+
+	statusCh, errCh := cli.ContainerWait(ctx, createResp.ID, container.WaitConditionNotRunning)
+	select {
+	case err := <-errCh:
+		if err != nil {
+			panic(err)
+		}
+	case <-statusCh:
+	}
+
+	commitResp, err := cli.ContainerCommit(ctx, createResp.ID, types.ContainerCommitOptions{Reference: "helloworld"})
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Println(commitResp.ID)
+}
+
import docker
+client = docker.from_env()
+container = client.containers.run("alpine", ["touch", "/helloworld"], detach=True)
+container.wait()
+image = container.commit("helloworld")
+print(image.id)
+
$ docker run -d alpine touch /helloworld
+0888269a9d584f0fa8fc96b3c0d8d57969ceea3a64acf47cd34eebb4744dbc52
+$ curl --unix-socket /var/run/docker.sock\
+  -X POST "http://localhost/v1.41/commit?container=0888269a9d&repo=helloworld"
+{"Id":"sha256:6c86a5cd4b87f2771648ce619e319f3e508394b5bfc2cdbd2d60f59d52acda6c"}
+
+

developing, api, sdk, developers, rest, curl, python, go

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/api/sdk/examples/ +

+
diff --git a/devdocs/docker/engine%2Fapi%2Fsdk%2Findex.html b/devdocs/docker/engine%2Fapi%2Fsdk%2Findex.html new file mode 100644 index 00000000..d3b76c78 --- /dev/null +++ b/devdocs/docker/engine%2Fapi%2Fsdk%2Findex.html @@ -0,0 +1,81 @@ +

Develop with Docker Engine SDKs

+ +

Docker provides an API for interacting with the Docker daemon (called the Docker Engine API), as well as SDKs for Go and Python. The SDKs allow you to build and scale Docker apps and solutions quickly and easily. If Go or Python don’t work for you, you can use the Docker Engine API directly.

The Docker Engine API is a RESTful API accessed by an HTTP client such as wget or curl, or the HTTP library which is part of most modern programming languages.

Install the SDKs

Use the following commands to install the Go or Python SDK. Both SDKs can be installed and coexist together.

Go SDK

$ go get github.com/docker/docker/client
+

The client requires a recent version of Go. Run go version and ensure that you are running a currently supported version of Go

Read the full Docker Engine Go SDK reference.

Python SDK

Read the full Docker Engine Python SDK reference.

View the API reference

You can view the reference for the latest version of the API or choose a specific version.

Versioned API and SDK

The version of the Docker Engine API you should use depends upon the version of your Docker daemon and Docker client. Refer to the versioned API and SDK section in the API documentation for details.

SDK and API quickstart

Use the following guidelines to choose the SDK or API version to use in your code:

As an example, the docker run command can be easily implemented using the Docker API directly, or using the Python or Go SDK.

package main
+
+import (
+	"context"
+	"io"
+	"os"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/pkg/stdcopy"
+)
+
+func main() {
+    ctx := context.Background()
+    cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+    if err != nil {
+        panic(err)
+    }
+
+    reader, err := cli.ImagePull(ctx, "docker.io/library/alpine", types.ImagePullOptions{})
+    if err != nil {
+        panic(err)
+    }
+    io.Copy(os.Stdout, reader)
+
+    resp, err := cli.ContainerCreate(ctx, &container.Config{
+        Image: "alpine",
+        Cmd:   []string{"echo", "hello world"},
+    }, nil, nil, nil, "")
+    if err != nil {
+        panic(err)
+    }
+
+    if err := cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+        panic(err)
+    }
+
+    statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
+    select {
+    case err := <-errCh:
+        if err != nil {
+            panic(err)
+        }
+    case <-statusCh:
+    }
+
+    out, err := cli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true})
+    if err != nil {
+        panic(err)
+    }
+
+    stdcopy.StdCopy(os.Stdout, os.Stderr, out)
+}
+
import docker
+client = docker.from_env()
+print client.containers.run("alpine", ["echo", "hello", "world"])
+
$ curl --unix-socket /var/run/docker.sock -H "Content-Type: application/json" \
+  -d '{"Image": "alpine", "Cmd": ["echo", "hello world"]}' \
+  -X POST http://localhost/v1.41/containers/create
+{"Id":"1c6594faf5","Warnings":null}
+
+$ curl --unix-socket /var/run/docker.sock -X POST http://localhost/v1.41/containers/1c6594faf5/start
+
+$ curl --unix-socket /var/run/docker.sock -X POST http://localhost/v1.41/containers/1c6594faf5/wait
+{"StatusCode":0}
+
+$ curl --unix-socket /var/run/docker.sock "http://localhost/v1.41/containers/1c6594faf5/logs?stdout=1"
+hello world
+

When using cURL to connect over a unix socket, the hostname is not important. The examples above use localhost, but any hostname would work.

Using cURL 7.47.0 or below?

The examples above assume you are using cURL 7.50.0 or above. Older versions of cURL used a non-standard URL notation when using a socket connection.

If you are using an older version of cURL, use http:/<API version>/ instead, for example, http:/v1.41/containers/1c6594faf5/start

For more examples, take a look at the SDK examples.

Unofficial libraries

There are a number of community supported libraries available for other languages. They have not been tested by Docker, so if you run into any issues, file them with the library maintainers.

Language Library
C libdocker
C# Docker.DotNet
C++ lasote/docker_client
Clojure clj-docker-client
Clojure contajners
Dart bwu_docker
Erlang erldocker
Gradle gradle-docker-plugin
Groovy docker-client
Haskell docker-hs
HTML (Web Components) docker-elements
Java docker-client
Java docker-java
Java docker-java-api
Java jocker
NodeJS dockerode
NodeJS harbor-master
Perl Eixo::Docker
PHP Docker-PHP
Ruby docker-api
Rust docker-rust
Rust shiplift
Scala tugboat
Scala reactive-docker
Swift docker-client-swift
+

developing, sdk

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/api/sdk/ +

+
diff --git a/devdocs/docker/engine%2Fcontext%2Fworking-with-contexts%2Findex.html b/devdocs/docker/engine%2Fcontext%2Fworking-with-contexts%2Findex.html new file mode 100644 index 00000000..9c5f0497 --- /dev/null +++ b/devdocs/docker/engine%2Fcontext%2Fworking-with-contexts%2Findex.html @@ -0,0 +1,101 @@ +

Docker Context

+ +

Introduction

This guide shows how contexts make it easy for a single Docker CLI to manage multiple Swarm clusters, multiple Kubernetes clusters, and multiple individual Docker nodes.

A single Docker CLI can have multiple contexts. Each context contains all of the endpoint and security information required to manage a different cluster or node. The docker context command makes it easy to configure these contexts and switch between them.

As an example, a single Docker client on your company laptop might be configured with two contexts; dev-k8s and prod-swarm. dev-k8s contains the endpoint data and security credentials to configure and manage a Kubernetes cluster in a development environment. prod-swarm contains everything required to manage a Swarm cluster in a production environment. Once these contexts are configured, you can use the top-level docker context use <context-name> to easily switch between them.

For information on using Docker Context to deploy your apps to the cloud, see Deploying Docker containers on Azure and Deploying Docker containers on ECS.

Prerequisites

To follow the examples in this guide, you’ll need:

Run docker context to verify that your Docker client supports contexts.

You will also need one of the following:

The anatomy of a context

A context is a combination of several properties. These include:

The easiest way to see what a context looks like is to view the default context.

$ docker context ls
+NAME          DESCRIPTION     DOCKER ENDPOINT                KUBERNETES ENDPOINT      ORCHESTRATOR
+default *     Current...      unix:///var/run/docker.sock                             swarm
+

This shows a single context called “default”. It’s configured to talk to a Swarm cluster through the local /var/run/docker.sock Unix socket. It has no Kubernetes endpoint configured.

The asterisk in the NAME column indicates that this is the active context. This means all docker commands will be executed against the “default” context unless overridden with environment variables such as DOCKER_HOST and DOCKER_CONTEXT, or on the command-line with the --context and --host flags.

Dig a bit deeper with docker context inspect. In this example, we’re inspecting the context called default.

$ docker context inspect default
+[
+    {
+        "Name": "default",
+        "Metadata": {
+            "StackOrchestrator": "swarm"
+        },
+        "Endpoints": {
+            "docker": {
+                "Host": "unix:///var/run/docker.sock",
+                "SkipTLSVerify": false
+            }
+        },
+        "TLSMaterial": {},
+        "Storage": {
+            "MetadataPath": "\u003cIN MEMORY\u003e",
+            "TLSPath": "\u003cIN MEMORY\u003e"
+        }
+    }
+]
+

This context is using “swarm” as the orchestrator (metadata.stackOrchestrator). It is configured to talk to an endpoint exposed on a local Unix socket at /var/run/docker.sock (Endpoints.docker.Host), and requires TLS verification (Endpoints.docker.SkipTLSVerify).

Create a new context

You can create new contexts with the docker context create command.

The following example creates a new context called “docker-test” and specifies the following:

$ docker context create docker-test \
+  --default-stack-orchestrator=swarm \
+  --docker host=unix:///var/run/docker.sock
+
+Successfully created context "docker-test"
+

The new context is stored in a meta.json file below ~/.docker/contexts/. Each new context you create gets its own meta.json stored in a dedicated sub-directory of ~/.docker/contexts/.

Note: The default context behaves differently than manually created contexts. It does not have a meta.json configuration file, and it dynamically updates based on the current configuration. For example, if you switch your current Kubernetes config using kubectl config use-context, the default Docker context will dynamically update itself to the new Kubernetes endpoint.

You can view the new context with docker context ls and docker context inspect <context-name>.

The following can be used to create a config with Kubernetes as the default orchestrator using the existing kubeconfig stored in /home/ubuntu/.kube/config. For this to work, you will need a valid kubeconfig file in /home/ubuntu/.kube/config. If your kubeconfig has more than one context, the current context (kubectl config current-context) will be used.

$ docker context create k8s-test \
+  --default-stack-orchestrator=kubernetes \
+  --kubernetes config-file=/home/ubuntu/.kube/config \
+  --docker host=unix:///var/run/docker.sock
+
+Successfully created context "k8s-test"
+

You can view all contexts on the system with docker context ls.

$ docker context ls
+NAME           DESCRIPTION   DOCKER ENDPOINT               KUBERNETES ENDPOINT               ORCHESTRATOR
+default *      Current       unix:///var/run/docker.sock   https://35.226.99.100 (default)   swarm
+k8s-test                     unix:///var/run/docker.sock   https://35.226.99.100 (default)   kubernetes
+docker-test                  unix:///var/run/docker.sock                                     swarm
+

The current context is indicated with an asterisk (“*”).

Use a different context

You can use docker context use to quickly switch between contexts.

The following command will switch the docker CLI to use the “k8s-test” context.

$ docker context use k8s-test
+
+k8s-test
+Current context is now "k8s-test"
+

Verify the operation by listing all contexts and ensuring the asterisk (“*”) is against the “k8s-test” context.

$ docker context ls
+NAME            DESCRIPTION                               DOCKER ENDPOINT               KUBERNETES ENDPOINT               ORCHESTRATOR
+default         Current DOCKER_HOST based configuration   unix:///var/run/docker.sock   https://35.226.99.100 (default)   swarm
+docker-test                                               unix:///var/run/docker.sock                                     swarm
+k8s-test *                                                unix:///var/run/docker.sock   https://35.226.99.100 (default)   kubernetes
+

docker commands will now target endpoints defined in the “k8s-test” context.

You can also set the current context using the DOCKER_CONTEXT environment variable. This overrides the context set with docker context use.

Use the appropriate command below to set the context to docker-test using an environment variable.

Windows PowerShell:

> $Env:DOCKER_CONTEXT=docker-test
+

Linux:

$ export DOCKER_CONTEXT=docker-test
+

Run a docker context ls to verify that the “docker-test” context is now the active context.

You can also use the global --context flag to override the context specified by the DOCKER_CONTEXT environment variable. For example, the following will send the command to a context called “production”.

$ docker --context production container ls
+

Exporting and importing Docker contexts

The docker context command makes it easy to export and import contexts on different machines with the Docker client installed.

You can use the docker context export command to export an existing context to a file. This file can later be imported on another machine that has the docker client installed.

By default, contexts will be exported as a native Docker contexts. You can export and import these using the docker context command. If the context you are exporting includes a Kubernetes endpoint, the Kubernetes part of the context will be included in the export and import operations.

There is also an option to export just the Kubernetes part of a context. This will produce a native kubeconfig file that can be manually merged with an existing ~/.kube/config file on another host that has kubectl installed. You cannot export just the Kubernetes portion of a context and then import it with docker context import. The only way to import the exported Kubernetes config is to manually merge it into an existing kubeconfig file.

Let’s look at exporting and importing a native Docker context.

Exporting and importing a native Docker context

The following example exports an existing context called “docker-test”. It will be written to a file called docker-test.dockercontext.

$ docker context export docker-test
+Written file "docker-test.dockercontext"
+

Check the contents of the export file.

$ cat docker-test.dockercontext
+meta.json0000644000000000000000000000022300000000000011023 0ustar0000000000000000{"Name":"docker-test","Metadata":{"StackOrchestrator":"swarm"},"Endpoints":{"docker":{"Host":"unix:///var/run/docker.sock","SkipTLSVerify":false}}}tls0000700000000000000000000000000000000000000007716 5ustar0000000000000000
+

This file can be imported on another host using docker context import. The target host must have the Docker client installed.

$ docker context import docker-test docker-test.dockercontext
+docker-test
+Successfully imported context "docker-test"
+

You can verify that the context was imported with docker context ls.

The format of the import command is docker context import <context-name> <context-file>.

Now, let’s look at exporting just the Kubernetes parts of a context.

Exporting a Kubernetes context

You can export a Kubernetes context only if the context you are exporting has a Kubernetes endpoint configured. You cannot import a Kubernetes context using docker context import.

These steps will use the --kubeconfig flag to export only the Kubernetes elements of the existing k8s-test context to a file called “k8s-test.kubeconfig”. The cat command will then show that it’s exported as a valid kubeconfig file.

$ docker context export k8s-test --kubeconfig
+Written file "k8s-test.kubeconfig"
+

Verify that the exported file contains a valid kubectl config.

$ cat k8s-test.kubeconfig
+apiVersion: v1
+clusters:
+- cluster:
+    certificate-authority-data:
+    <Snip>
+    server: https://35.226.99.100
+  name: cluster
+contexts:
+- context:
+    cluster: cluster
+    namespace: default
+    user: authInfo
+  name: context
+current-context: context
+kind: Config
+preferences: {}
+users:
+- name: authInfo
+  user:
+    auth-provider:
+      config:
+        cmd-args: config config-helper --format=json
+        cmd-path: /snap/google-cloud-sdk/77/bin/gcloud
+        expiry-key: '{.credential.token_expiry}'
+        token-key: '{.credential.access_token}'
+      name: gcp
+

You can merge this with an existing ~/.kube/config file on another machine.

Updating a context

You can use docker context update to update fields in an existing context.

The following example updates the “Description” field in the existing k8s-test context.

$ docker context update k8s-test --description "Test Kubernetes cluster"
+k8s-test
+Successfully updated context "k8s-test"
+
+

engine, context, cli, kubernetes

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/context/working-with-contexts/ +

+
diff --git a/devdocs/docker/engine%2Fdeprecated%2Findex.html b/devdocs/docker/engine%2Fdeprecated%2Findex.html new file mode 100644 index 00000000..559fb679 --- /dev/null +++ b/devdocs/docker/engine%2Fdeprecated%2Findex.html @@ -0,0 +1,82 @@ +

Deprecated Engine Features

This page provides an overview of features that are deprecated in Engine. Changes in packaging, and supported (Linux) distributions are not included. To learn about end of support for Linux distributions, refer to the release notes.

Feature Deprecation Policy

As changes are made to Docker there may be times when existing features need to be removed or replaced with newer features. Before an existing feature is removed it is labeled as “deprecated” within the documentation and remains in Docker for at least one stable release unless specified explicitly otherwise. After that time it may be removed.

Users are expected to take note of the list of deprecated features each release and plan their migration away from those features, and (if applicable) towards the replacement features as soon as possible.

Deprecated Engine Features

The table below provides an overview of the current status of deprecated features:

Status Feature Deprecated Remove
Deprecated Support for encrypted TLS private keys v20.10 -
Deprecated Kubernetes stack and context support v20.10 -
Deprecated Pulling images from non-compliant image registries v20.10 -
Deprecated Linux containers on Windows (LCOW) v20.10 -
Deprecated BLKIO weight options with cgroups v1 v20.10 -
Deprecated Kernel memory limit v20.10 -
Deprecated Classic Swarm and overlay networks using external key/value stores v20.10 -
Deprecated Support for the legacy ~/.dockercfg configuration file for authentication v20.10 -
Deprecated CLI plugins support v20.10 -
Deprecated Dockerfile legacy ENV name value syntax v20.10 -
Removed docker build --stream flag (experimental) v20.10 v20.10
Deprecated fluentd-async-connect log opt v20.10 -
Deprecated Configuration options for experimental CLI features v19.03 v20.10
Deprecated Pushing and pulling with image manifest v2 schema 1 v19.03 v20.10
Removed docker engine subcommands v19.03 v20.10
Removed Top-level docker deploy subcommand (experimental) v19.03 v20.10
Removed docker stack deploy using “dab” files (experimental) v19.03 v20.10
Deprecated AuFS storage driver v19.03 -
Deprecated Legacy “overlay” storage driver v18.09 -
Deprecated Device mapper storage driver v18.09 -
Removed Use of reserved namespaces in engine labels v18.06 v20.10
Removed --disable-legacy-registry override daemon option v17.12 v19.03
Removed Interacting with V1 registries v17.06 v17.12
Removed Asynchronous service create and service update as default v17.05 v17.10
Removed -g and --graph flags on dockerd v17.05 -
Deprecated Top-level network properties in NetworkSettings v1.13 v17.12
Removed filter param for /images/json endpoint v1.13 v20.10
Removed repository:shortid image references v1.13 v17.12
Removed docker daemon subcommand v1.13 v17.12
Removed Duplicate keys with conflicting values in engine labels v1.13 v17.12
Deprecated MAINTAINER in Dockerfile v1.13 -
Deprecated API calls without a version v1.13 v17.12
Removed Backing filesystem without d_type support for overlay/overlay2 v1.13 v17.12
Removed --automated and --stars flags on docker search v1.12 v20.10
Deprecated -h shorthand for --help v1.12 v17.09
Removed -e and --email flags on docker login v1.11 v17.06
Deprecated Separator (:) of --security-opt flag on docker run v1.11 v17.06
Deprecated Ambiguous event fields in API v1.10 -
Removed -f flag on docker tag v1.10 v1.12
Removed HostConfig at API container start v1.10 v1.12
Removed --before and --since flags on docker ps v1.10 v1.12
Removed Driver-specific log tags v1.9 v1.12
Removed Docker Content Trust ENV passphrase variables name change v1.9 v1.12
Removed /containers/(id or name)/copy endpoint v1.8 v1.12
Removed LXC built-in exec driver v1.8 v1.10
Removed Old Command Line Options v1.8 v1.10
Removed --api-enable-cors flag on dockerd v1.6 v17.09
Removed --run flag on docker commit v0.10 v1.13
Removed Three arguments form in docker import v0.6.7 v1.12

Support for encrypted TLS private keys

Deprecated in Release: v20.10

Use of encrypted TLS private keys has been deprecated, and will be removed in a future release. Golang has deprecated support for legacy PEM encryption (as specified in RFC 1423), as it is insecure by design (see https://go-review.googlesource.com/c/go/+/264159).

Kubernetes stack and context support

Deprecated in Release: v20.10

Following the deprecation of Compose on Kubernetes, support for Kubernetes in the stack and context commands in the docker CLI is now marked as deprecated as well.

Pulling images from non-compliant image registries

Deprecated in Release: v20.10

Docker Engine v20.10 and up includes optimizations to verify if images in the local image cache need updating before pulling, preventing the Docker Engine from making unnecessary API requests. These optimizations require the container image registry to conform to the Open Container Initiative Distribution Specification.

While most registries conform to the specification, we encountered some registries to be non-compliant, resulting in docker pull to fail.

As a temporary solution, Docker Engine v20.10 includes a fallback mechanism to allow docker pull to be functional when using a non-compliant registry. A warning message is printed in this situation:

WARNING Failed to pull manifest by the resolved digest. This registry does not
+        appear to conform to the distribution registry specification; falling back to
+        pull by tag. This fallback is DEPRECATED, and will be removed in a future
+        release.
+

The fallback is added to allow users to either migrate their images to a compliant registry, or for these registries to become compliant.

Note that this fallback only addresses failures on docker pull. Other commands, such as docker stack deploy, or pulling images with containerd will continue to fail.

Given that other functionality is still broken with these registries, we consider this fallback a temporary solution, and will remove the fallback in an upcoming major release.

Linux containers on Windows (LCOW) (experimental)

Deprecated in Release: v20.10

The experimental feature to run Linux containers on Windows (LCOW) was introduced as a technical preview in Docker 17.09. While many enhancements were made after its introduction, the feature never reached completeness, and development has now stopped in favor of running docker natively on Linux in WSL2.

Developers who want to run Linux workloads on a Windows host are encouraged to use Docker Desktop with WSL2 instead.

BLKIO weight options with cgroups v1

Deprecated in Release: v20.10

Specifying blkio weight (docker run --blkio-weight and docker run --blkio-weight-device) is now marked as deprecated when using cgroups v1 because the corresponding features were removed in Linux kernel v5.0 and up. When using cgroups v2, the --blkio-weight options are implemented using `io.weight.

Kernel memory limit

Deprecated in Release: v20.10

Specifying kernel memory limit (docker run --kernel-memory) is now marked as deprecated, as Linux kernel deprecated kmem.limit_in_bytes in v5.4.

Classic Swarm and overlay networks using cluster store

Deprecated in Release: v20.10

Standalone (“classic”) Swarm has been deprecated, and with that the use of overlay networks using an external key/value store. The corresponding--cluster-advertise, --cluster-store, and --cluster-store-opt daemon options have been marked deprecated, and will be disabled or removed in a future release.

Support for legacy ~/.dockercfg configuration files

Deprecated in Release: v20.10

The docker CLI up until v1.7.0 used the ~/.dockercfg file to store credentials after authenticating to a registry (docker login). Docker v1.7.0 replaced this file with a new CLI configuration file, located in ~/.docker/config.json. When implementing the new configuration file, the old file (and file-format) was kept as a fall-back, to assist existing users with migrating to the new file.

Given that the old file format encourages insecure storage of credentials (credentials are stored unencrypted), and that no version of the CLI since Docker v1.7.0 has created this file, the file is marked deprecated, and support for this file will be removed in a future release.

Configuration options for experimental CLI features

The DOCKER_CLI_EXPERIMENTAL environment variable and the corresponding experimental field in the CLI configuration file are deprecated. Experimental features will be enabled by default, and these configuration options will no longer be functional.

CLI plugins support

Deprecated in Release: v20.10

CLI Plugin API is now marked as deprecated.

Dockerfile legacy ENV name value syntax

Deprecated in Release: v20.10

The Dockerfile ENV instruction allows values to be set using either ENV name=value or ENV name value. The latter (ENV name value) form can be ambiguous, for example, the following defines a single env-variable (ONE) with value "TWO= THREE=world", but may have intended to be setting three env-vars:

ENV ONE TWO= THREE=world
+

This format also does not allow setting multiple environment-variables in a single ENV line in the Dockerfile.

Use of the ENV name value syntax is discouraged, and may be removed in a future release. Users are encouraged to update their Dockerfiles to use the ENV name=value syntax, for example:

ENV ONE="" TWO="" THREE="world"
+

+docker build --stream flag (experimental)

Deprecated in Release: v20.10 Removed in Release: v20.10

Docker v17.07 introduced an experimental --stream flag on docker build which allowed the build-context to be incrementally sent to the daemon, instead of unconditionally sending the whole build-context.

This functionality has been reimplemented as part of BuildKit, which uses streaming by default and the --stream option will be ignored when using the classic builder, printing a deprecation warning instead.

Users that want to use this feature are encouraged to enable BuildKit by setting the DOCKER_BUILDKIT=1 environment variable or through the daemon or CLI configuration files.

+fluentd-async-connect log opt

Deprecated in Release: v20.10

The --log-opt fluentd-async-connect option for the fluentd logging driver is deprecated in favor of --log-opt fluentd-async. A deprecation message is logged in the daemon logs if the old option is used:

fluent#New: AsyncConnect is now deprecated, please use Async instead
+

Users are encouraged to use the fluentd-async option going forward, as support for the old option will be removed in a future release.

Pushing and pulling with image manifest v2 schema 1

Deprecated in Release: v19.03

Target For Removal In Release: v20.10

The image manifest v2 schema 1 format is deprecated in favor of the v2 schema 2 format.

If the registry you are using still supports v2 schema 1, urge their administrators to move to v2 schema 2.

+docker engine subcommands

Deprecated in Release: v19.03

Removed in Release: v20.10

The docker engine activate, docker engine check, and docker engine update provided an alternative installation method to upgrade Docker Community engines to Docker Enterprise, using an image-based distribution of the Docker Engine.

This feature was only available on Linux, and only when executed on a local node. Given the limitations of this feature, and the feature not getting widely adopted, the docker engine subcommands will be removed, in favor of installation through standard package managers.

Top-level docker deploy subcommand (experimental)

Deprecated in Release: v19.03

Removed in Release: v20.10

The top-level docker deploy command (using the “Docker Application Bundle” (.dab) file format was introduced as an experimental feature in Docker 1.13 / 17.03, but superseded by support for Docker Compose files using the docker stack deploy subcommand.

+docker stack deploy using “dab” files (experimental)

Deprecated in Release: v19.03

Removed in Release: v20.10

With no development being done on this feature, and no active use of the file format, support for the DAB file format and the top-level docker deploy command (hidden by default in 19.03), will be removed, in favour of docker stack deploy using compose files.

AuFS storage driver

Deprecated in Release: v19.03

The aufs storage driver is deprecated in favor of overlay2, and will be removed in a future release. Users of the aufs storage driver are recommended to migrate to a different storage driver, such as overlay2, which is now the default storage driver.

The aufs storage driver facilitates running Docker on distros that have no support for OverlayFS, such as Ubuntu 14.04 LTS, which originally shipped with a 3.14 kernel.

Now that Ubuntu 14.04 is no longer a supported distro for Docker, and overlay2 is available to all supported distros (as they are either on kernel 4.x, or have support for multiple lowerdirs backported), there is no reason to continue maintenance of the aufs storage driver.

Legacy “overlay” storage driver

Deprecated in Release: v18.09

The overlay storage driver is deprecated in favor of the overlay2 storage driver, which has all the benefits of overlay, without its limitations (excessive inode consumption). The legacy overlay storage driver will be removed in a future release. Users of the overlay storage driver should migrate to the overlay2 storage driver.

The legacy overlay storage driver allowed using overlayFS-backed filesystems on pre 4.x kernels. Now that all supported distributions are able to run overlay2 (as they are either on kernel 4.x, or have support for multiple lowerdirs backported), there is no reason to keep maintaining the overlay storage driver.

Device mapper storage driver

Deprecated in Release: v18.09

The devicemapper storage driver is deprecated in favor of overlay2, and will be removed in a future release. Users of the devicemapper storage driver are recommended to migrate to a different storage driver, such as overlay2, which is now the default storage driver.

The devicemapper storage driver facilitates running Docker on older (3.x) kernels that have no support for other storage drivers (such as overlay2, or AUFS).

Now that support for overlay2 is added to all supported distros (as they are either on kernel 4.x, or have support for multiple lowerdirs backported), there is no reason to continue maintenance of the devicemapper storage driver.

Use of reserved namespaces in engine labels

Deprecated in Release: v18.06

Removed In Release: v20.10

The namespaces com.docker.*, io.docker.*, and org.dockerproject.* in engine labels were always documented to be reserved, but there was never any enforcement.

Usage of these namespaces will now cause a warning in the engine logs to discourage their use, and will error instead in v20.10 and above.

+--disable-legacy-registry override daemon option

Disabled In Release: v17.12

Removed In Release: v19.03

The --disable-legacy-registry flag was disabled in Docker 17.12 and will print an error when used. For this error to be printed, the flag itself is still present, but hidden. The flag has been removed in Docker 19.03.

Interacting with V1 registries

Disabled By Default In Release: v17.06

Removed In Release: v17.12

Version 1.8.3 added a flag (--disable-legacy-registry=false) which prevents the docker daemon from pull, push, and login operations against v1 registries. Though enabled by default, this signals the intent to deprecate the v1 protocol.

Support for the v1 protocol to the public registry was removed in 1.13. Any mirror configurations using v1 should be updated to use a v2 registry mirror.

Starting with Docker 17.12, support for V1 registries has been removed, and the --disable-legacy-registry flag can no longer be used, and dockerd will fail to start when set.

Asynchronous service create and service update as default

Deprecated In Release: v17.05

Disabled by default in release: v17.10

Docker 17.05 added an optional --detach=false option to make the docker service create and docker service update work synchronously. This option will be enabled by default in Docker 17.10, at which point the --detach flag can be used to use the previous (asynchronous) behavior.

The default for this option will also be changed accordingly for docker service rollback and docker service scale in Docker 17.10.

+-g and --graph flags on dockerd +

Deprecated In Release: v17.05

The -g or --graph flag for the dockerd or docker daemon command was used to indicate the directory in which to store persistent data and resource configuration and has been replaced with the more descriptive --data-root flag.

These flags were added before Docker 1.0, so will not be removed, only hidden, to discourage their use.

Top-level network properties in NetworkSettings

Deprecated In Release: v1.13.0

Target For Removal In Release: v17.12

When inspecting a container, NetworkSettings contains top-level information about the default (“bridge”) network;

EndpointID, Gateway, GlobalIPv6Address, GlobalIPv6PrefixLen, IPAddress, IPPrefixLen, IPv6Gateway, and MacAddress.

These properties are deprecated in favor of per-network properties in NetworkSettings.Networks. These properties were already “deprecated” in docker 1.9, but kept around for backward compatibility.

Refer to #17538 for further information.

+filter param for /images/json endpoint

Deprecated In Release: v1.13.0

Removed In Release: v20.10

The filter param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named reference.

+repository:shortid image references

Deprecated In Release: v1.13.0

Removed In Release: v17.12

The repository:shortid syntax for referencing images is very little used, collides with tag references, and can be confused with digest references.

Support for the repository:shortid notation to reference images was removed in Docker 17.12.

+docker daemon subcommand

Deprecated In Release: v1.13.0

Removed In Release: v17.12

The daemon is moved to a separate binary (dockerd), and should be used instead.

Duplicate keys with conflicting values in engine labels

Deprecated In Release: v1.13.0

Removed In Release: v17.12

When setting duplicate keys with conflicting values, an error will be produced, and the daemon will fail to start.

+MAINTAINER in Dockerfile

Deprecated In Release: v1.13.0

MAINTAINER was an early very limited form of LABEL which should be used instead.

API calls without a version

Deprecated In Release: v1.13.0

Target For Removal In Release: v17.12

API versions should be supplied to all API calls to ensure compatibility with future Engine versions. Instead of just requesting, for example, the URL /containers/json, you must now request /v1.25/containers/json.

Backing filesystem without d_type support for overlay/overlay2

Deprecated In Release: v1.13.0

Removed In Release: v17.12

The overlay and overlay2 storage driver does not work as expected if the backing filesystem does not support d_type. For example, XFS does not support d_type if it is formatted with the ftype=0 option.

Starting with Docker 17.12, new installations will not support running overlay2 on a backing filesystem without d_type support. For existing installations that upgrade to 17.12, a warning will be printed.

Please also refer to #27358 for further information.

Deprecated in Release: v1.12.0

Removed In Release: v20.10

The docker search --automated and docker search --stars options are deprecated. Use docker search --filter=is-automated=<true|false> and docker search --filter=stars=... instead.

+-h shorthand for --help +

Deprecated In Release: v1.12.0

Target For Removal In Release: v17.09

The shorthand (-h) is less common than --help on Linux and cannot be used on all subcommands (due to it conflicting with, e.g. -h / --hostname on docker create). For this reason, the -h shorthand was not printed in the “usage” output of subcommands, nor documented, and is now marked “deprecated”.

+-e and --email flags on docker login +

Deprecated In Release: v1.11.0

Removed In Release: v17.06

The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn’t exist. Due to this change, the email flag is no longer required, and will be deprecated.

Separator (:) of --security-opt flag on docker run +

Deprecated In Release: v1.11.0

Target For Removal In Release: v17.06

The flag --security-opt doesn’t use the colon separator (:) anymore to divide keys and values, it uses the equal symbol (=) for consistency with other similar flags, like --storage-opt.

Ambiguous event fields in API

Deprecated In Release: v1.10.0

The fields ID, Status and From in the events API have been deprecated in favor of a more rich structure. See the events API documentation for the new format.

+-f flag on docker tag +

Deprecated In Release: v1.10.0

Removed In Release: v1.12.0

To make tagging consistent across the various docker commands, the -f flag on the docker tag command is deprecated. It is not longer necessary to specify -f to move a tag from one image to another. Nor will docker generate an error if the -f flag is missing and the specified tag is already in use.

HostConfig at API container start

Deprecated In Release: v1.10.0

Removed In Release: v1.12.0

Passing an HostConfig to POST /containers/{name}/start is deprecated in favor of defining it at container creation (POST /containers/create).

+--before and --since flags on docker ps +

Deprecated In Release: v1.10.0

Removed In Release: v1.12.0

The docker ps --before and docker ps --since options are deprecated. Use docker ps --filter=before=... and docker ps --filter=since=... instead.

Driver-specific log tags

Deprecated In Release: v1.9.0

Removed In Release: v1.12.0

Log tags are now generated in a standard way across different logging drivers. Because of which, the driver specific log tag options syslog-tag, gelf-tag and fluentd-tag have been deprecated in favor of the generic tag option.

+$ docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"
+
+

Docker Content Trust ENV passphrase variables name change

Deprecated In Release: v1.9.0

Removed In Release: v1.12.0

Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we’re also changing the corresponding environment variables

+/containers/(id or name)/copy endpoint

Deprecated In Release: v1.8.0

Removed In Release: v1.12.0

The endpoint /containers/(id or name)/copy is deprecated in favor of /containers/(id or name)/archive.

LXC built-in exec driver

Deprecated In Release: v1.8.0

Removed In Release: v1.10.0

The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed.

Old Command Line Options

Deprecated In Release: v1.8.0

Removed In Release: v1.10.0

The flags -d and --daemon are deprecated in favor of the daemon subcommand:

docker daemon -H ...
+

The following single-dash (-opt) variant of certain command line options are deprecated and replaced with double-dash options (--opt):

docker attach -nostdin
+docker attach -sig-proxy
+docker build -no-cache
+docker build -rm
+docker commit -author
+docker commit -run
+docker events -since
+docker history -notrunc
+docker images -notrunc
+docker inspect -format
+docker ps -beforeId
+docker ps -notrunc
+docker ps -sinceId
+docker rm -link
+docker run -cidfile
+docker run -dns
+docker run -entrypoint
+docker run -expose
+docker run -link
+docker run -lxc-conf
+docker run -n
+docker run -privileged
+docker run -volumes-from
+docker search -notrunc
+docker search -stars
+docker search -t
+docker search -trusted
+docker tag -force
+

The following double-dash options are deprecated and have no replacement:

docker run --cpuset
+docker run --networking
+docker ps --since-id
+docker ps --before-id
+docker search --trusted
+

Deprecated In Release: v1.5.0

Removed In Release: v1.12.0

The single-dash (-help) was removed, in favor of the double-dash --help

docker -help
+docker [COMMAND] -help
+

+--api-enable-cors flag on dockerd

Deprecated In Release: v1.6.0

Removed In Release: v17.09

The flag --api-enable-cors is deprecated since v1.6.0. Use the flag --api-cors-header instead.

+--run flag on docker commit

Deprecated In Release: v0.10.0

Removed In Release: v1.13.0

The flag --run of the docker commit (and its short version -run) were deprecated in favor of the --changes flag that allows to pass Dockerfile commands.

Three arguments form in docker import +

Deprecated In Release: v0.6.7

Removed In Release: v1.12.0

The docker import command format file|URL|- [REPOSITORY [TAG]] is deprecated since November 2013. It’s no more supported.

+

docker, documentation, about, technology, deprecate

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/deprecated/ +

+
diff --git a/devdocs/docker/engine%2Fextend%2Fconfig%2Findex.html b/devdocs/docker/engine%2Fextend%2Fconfig%2Findex.html new file mode 100644 index 00000000..a154b487 --- /dev/null +++ b/devdocs/docker/engine%2Fextend%2Fconfig%2Findex.html @@ -0,0 +1,55 @@ +

Plugin Config Version 1 of Plugin V2

This document outlines the format of the V0 plugin configuration. The plugin config described herein was introduced in the Docker daemon in the v1.12.0 release.

Plugin configs describe the various constituents of a docker plugin. Plugin configs can be serialized to JSON format with the following media types:

Config Type Media Type
config “application/vnd.docker.plugin.v1+json”

+Config Field Descriptions

Config provides the base accessible fields for working with V0 plugin format in the registry.

Example Config

Example showing the ‘tiborvass/sample-volume-plugin’ plugin config.

{
+  "Args": {
+    "Description": "",
+    "Name": "",
+    "Settable": null,
+    "Value": null
+  },
+  "Description": "A sample volume plugin for Docker",
+  "Documentation": "https://docs.docker.com/engine/extend/plugins/",
+  "Entrypoint": [
+    "/usr/bin/sample-volume-plugin",
+    "/data"
+  ],
+  "Env": [
+    {
+      "Description": "",
+      "Name": "DEBUG",
+      "Settable": [
+        "value"
+      ],
+      "Value": "0"
+    }
+  ],
+  "Interface": {
+    "Socket": "plugin.sock",
+    "Types": [
+      "docker.volumedriver/1.0"
+    ]
+  },
+  "Linux": {
+    "Capabilities": null,
+    "AllowAllDevices": false,
+    "Devices": null
+  },
+  "Mounts": null,
+  "Network": {
+    "Type": ""
+  },
+  "PropagatedMount": "/data",
+  "User": {},
+  "Workdir": ""
+}
+
+

API, Usage, plugins, documentation, developer

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/extend/config/ +

+
diff --git a/devdocs/docker/engine%2Fextend%2Findex.html b/devdocs/docker/engine%2Fextend%2Findex.html new file mode 100644 index 00000000..52b27035 --- /dev/null +++ b/devdocs/docker/engine%2Fextend%2Findex.html @@ -0,0 +1,89 @@ +

Docker Engine managed plugin system

Docker Engine’s plugin system allows you to install, start, stop, and remove plugins using Docker Engine.

For information about legacy (non-managed) plugins, refer to Understand legacy Docker Engine plugins.

Note

Docker Engine managed plugins are currently not supported on Windows daemons.

Installing and using a plugin

Plugins are distributed as Docker images and can be hosted on Docker Hub or on a private registry.

To install a plugin, use the docker plugin install command, which pulls the plugin from Docker Hub or your private registry, prompts you to grant permissions or capabilities if necessary, and enables the plugin.

To check the status of installed plugins, use the docker plugin ls command. Plugins that start successfully are listed as enabled in the output.

After a plugin is installed, you can use it as an option for another Docker operation, such as creating a volume.

In the following example, you install the sshfs plugin, verify that it is enabled, and use it to create a volume.

Note

This example is intended for instructional purposes only. Once the volume is created, your SSH password to the remote host will be exposed as plaintext when inspecting the volume. You should delete the volume as soon as you are done with the example.

  1. Install the sshfs plugin.

    $ docker plugin install vieux/sshfs
    +
    +Plugin "vieux/sshfs" is requesting the following privileges:
    +- network: [host]
    +- capabilities: [CAP_SYS_ADMIN]
    +Do you grant the above permissions? [y/N] y
    +
    +vieux/sshfs
    +

    The plugin requests 2 privileges:

    • It needs access to the host network.
    • It needs the CAP_SYS_ADMIN capability, which allows the plugin to run the mount command.
  2. Check that the plugin is enabled in the output of docker plugin ls.

    $ docker plugin ls
    +
    +ID                    NAME                  TAG                 DESCRIPTION                   ENABLED
    +69553ca1d789          vieux/sshfs           latest              the `sshfs` plugin            true
    +
  3. Create a volume using the plugin. This example mounts the /remote directory on host 1.2.3.4 into a volume named sshvolume.

    This volume can now be mounted into containers.

    $ docker volume create \
    +  -d vieux/sshfs \
    +  --name sshvolume \
    +  -o sshcmd=user@1.2.3.4:/remote \
    +  -o password=$(cat file_containing_password_for_remote_host)
    +
    +sshvolume
    +
  4. Verify that the volume was created successfully.

    $ docker volume ls
    +
    +DRIVER              NAME
    +vieux/sshfs         sshvolume
    +
  5. Start a container that uses the volume sshvolume.

    $ docker run --rm -v sshvolume:/data busybox ls /data
    +
    +<content of /remote on machine 1.2.3.4>
    +
  6. Remove the volume sshvolume

    $ docker volume rm sshvolume
    +
    +sshvolume
    +

To disable a plugin, use the docker plugin disable command. To completely remove it, use the docker plugin remove command. For other available commands and options, see the command line reference.

Developing a plugin

The rootfs directory

The rootfs directory represents the root filesystem of the plugin. In this example, it was created from a Dockerfile:

Note: The /run/docker/plugins directory is mandatory inside of the plugin’s filesystem for docker to communicate with the plugin.

$ git clone https://github.com/vieux/docker-volume-sshfs
+$ cd docker-volume-sshfs
+$ docker build -t rootfsimage .
+$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created
+$ sudo mkdir -p myplugin/rootfs
+$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs
+$ docker rm -vf "$id"
+$ docker rmi rootfsimage
+

The config.json file

The config.json file describes the plugin. See the plugins config reference.

Consider the following config.json file.

{
+	"description": "sshFS plugin for Docker",
+	"documentation": "https://docs.docker.com/engine/extend/plugins/",
+	"entrypoint": ["/docker-volume-sshfs"],
+	"network": {
+		   "type": "host"
+		   },
+	"interface" : {
+		   "types": ["docker.volumedriver/1.0"],
+		   "socket": "sshfs.sock"
+	},
+	"linux": {
+		"capabilities": ["CAP_SYS_ADMIN"]
+	}
+}
+

This plugin is a volume driver. It requires a host network and the CAP_SYS_ADMIN capability. It depends upon the /docker-volume-sshfs entrypoint and uses the /run/docker/plugins/sshfs.sock socket to communicate with Docker Engine. This plugin has no runtime parameters.

Creating the plugin

A new plugin can be created by running docker plugin create <plugin-name> ./path/to/plugin/data where the plugin data contains a plugin configuration file config.json and a root filesystem in subdirectory rootfs.

After that the plugin <plugin-name> will show up in docker plugin ls. Plugins can be pushed to remote registries with docker plugin push <plugin-name>.

Debugging plugins

Stdout of a plugin is redirected to dockerd logs. Such entries have a plugin=<ID> suffix. Here are a few examples of commands for pluginID f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 and their corresponding log entries in the docker daemon logs.

$ docker plugin install tiborvass/sample-volume-plugin
+
+INFO[0036] Starting...       Found 0 volumes on startup  plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+
$ docker volume create -d tiborvass/sample-volume-plugin samplevol
+
+INFO[0193] Create Called...  Ensuring directory /data/samplevol exists on host...  plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+INFO[0193] open /var/lib/docker/plugin-data/local-persist.json: no such file or directory  plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+INFO[0193]                   Created volume samplevol with mountpoint /data/samplevol  plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+INFO[0193] Path Called...    Returned path /data/samplevol  plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+
$ docker run -v samplevol:/tmp busybox sh
+
+INFO[0421] Get Called...     Found samplevol                plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+INFO[0421] Mount Called...   Mounted samplevol              plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+INFO[0421] Path Called...    Returned path /data/samplevol  plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+INFO[0421] Unmount Called... Unmounted samplevol            plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
+

Using docker-runc to obtain logfiles and shell into the plugin.

docker-runc, the default docker container runtime can be used for debugging plugins. This is specifically useful to collect plugin logs if they are redirected to a file.

$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins list
+
+ID                                                                 PID         STATUS      BUNDLE                                                                                                                                       CREATED                          OWNER
+93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25   15806       running     /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25   2018-02-08T21:40:08.621358213Z   root
+9b4606d84e06b56df84fadf054a21374b247941c94ce405b0a261499d689d9c9   14992       running     /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/9b4606d84e06b56df84fadf054a21374b247941c94ce405b0a261499d689d9c9   2018-02-08T21:35:12.321325872Z   root
+c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d   14984       running     /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d   2018-02-08T21:35:12.321288966Z   root
+
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 cat /var/log/plugin.log
+

If the plugin has a built-in shell, then exec into the plugin can be done as follows:

$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec -t 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 sh
+

Using curl to debug plugin socket issues.

To verify if the plugin API socket that the docker daemon communicates with is responsive, use curl. In this example, we will make API calls from the docker host to volume and network plugins using curl 7.47.0 to ensure that the plugin is listening on the said socket. For a well functioning plugin, these basic requests should work. Note that plugin sockets are available on the host under /var/run/docker/plugins/<pluginID>

$ curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List
+
+{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null}
+
$ curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities
+
+{"Scope":"local"}
+

When using curl 7.5 and above, the URL should be of the form http://hostname/APICall, where hostname is the valid hostname where the plugin is installed and APICall is the call to the plugin API.

For example, http://localhost/VolumeDriver.List

+

API, Usage, plugins, documentation, developer

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/extend/ +

+
diff --git a/devdocs/docker/engine%2Fextend%2Flegacy_plugins%2Findex.html b/devdocs/docker/engine%2Fextend%2Flegacy_plugins%2Findex.html new file mode 100644 index 00000000..830393d2 --- /dev/null +++ b/devdocs/docker/engine%2Fextend%2Flegacy_plugins%2Findex.html @@ -0,0 +1,9 @@ +

Use Docker Engine plugins

This document describes the Docker Engine plugins generally available in Docker Engine. To view information on plugins managed by Docker, refer to Docker Engine plugin system.

You can extend the capabilities of the Docker Engine by loading third-party plugins. This page explains the types of plugins and provides links to several volume and network plugins for Docker.

Types of plugins

Plugins extend Docker’s functionality. They come in specific types. For example, a volume plugin might enable Docker volumes to persist across multiple Docker hosts and a network plugin might provide network plumbing.

Currently Docker supports authorization, volume and network driver plugins. In the future it will support additional plugin types.

Installing a plugin

Follow the instructions in the plugin’s documentation.

Finding a plugin

The sections below provide an inexhaustive overview of available plugins.

Network plugins

Plugin Description
Contiv Networking An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards.
Kuryr Network Plugin A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well.
Weave Network Plugin A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity.

Volume plugins

Plugin Description
Azure File Storage plugin Lets you mount Microsoft Azure File Storage shares to Docker containers as volumes using the SMB 3.0 protocol. Learn more.
BeeGFS Volume Plugin An open source volume plugin to create persistent volumes in a BeeGFS parallel file system.
Blockbridge plugin A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS.
Contiv Volume Plugin An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS.
Convoy plugin A volume plugin for a variety of storage back-ends including device mapper and NFS. It’s a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore.
DigitalOcean Block Storage plugin Integrates DigitalOcean’s block storage solution into the Docker ecosystem by automatically attaching a given block storage volume to a DigitalOcean droplet and making the contents of the volume available to Docker containers running on that droplet.
DRBD plugin A volume plugin that provides highly available storage replicated by DRBD. Data written to the docker volume is replicated in a cluster of DRBD nodes.
Flocker plugin A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines.
Fuxi Volume Plugin A volume plugin that is developed as part of the OpenStack Kuryr project and implements the Docker volume plugin API by utilizing Cinder, the OpenStack block storage service.
gce-docker plugin A volume plugin able to attach, format and mount Google Compute persistent-disks.
GlusterFS plugin A volume plugin that provides multi-host volumes management for Docker using GlusterFS.
Horcrux Volume Plugin A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, Minio and Amazon S3.
HPE 3Par Volume Plugin A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays.
Infinit volume plugin A volume plugin that makes it easy to mount and manage Infinit volumes using Docker.
IPFS Volume Plugin An open source volume plugin that allows using an ipfs filesystem as a volume.
Keywhiz plugin A plugin that provides credentials and secret management using Keywhiz as a central repository.
Local Persist Plugin A volume plugin that extends the default local driver’s functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to always persist, even if the volume is removed via docker volume rm.
+NetApp Plugin (nDVP) A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future.
Netshare plugin A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems.
Nimble Storage Volume Plugin A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones.
OpenStorage Plugin A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few.
Portworx Volume Plugin A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler.
Quobyte Volume Plugin A volume plugin that connects Docker to Quobyte’s data center file system, a general-purpose scalable and fault-tolerant storage platform.
REX-Ray plugin A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC.
Virtuozzo Storage and Ploop plugin A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices.
VMware vSphere Storage Plugin Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments.

Authorization plugins

Plugin Description
Casbin AuthZ Plugin An authorization plugin based on Casbin, which supports access control models like ACL, RBAC, ABAC. The access control model can be customized. The policy can be persisted into file or DB.
HBM plugin An authorization plugin that prevents from executing commands with certains parameters.
Twistlock AuthZ Broker A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name).

Troubleshooting a plugin

If you are having problems with Docker after loading a plugin, ask the authors of the plugin for help. The Docker team may not be able to assist you.

Writing a plugin

If you are interested in writing a plugin for Docker, or seeing how they work under the hood, see the docker plugins reference.

+

Examples, Usage, plugins, docker, documentation, user guide

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/extend/legacy_plugins/ +

+
diff --git a/devdocs/docker/engine%2Fextend%2Fplugin_api%2Findex.html b/devdocs/docker/engine%2Fextend%2Fplugin_api%2Findex.html new file mode 100644 index 00000000..707ab33a --- /dev/null +++ b/devdocs/docker/engine%2Fextend%2Fplugin_api%2Findex.html @@ -0,0 +1,43 @@ +

Docker Plugin API

Docker plugins are out-of-process extensions which add capabilities to the Docker Engine.

This document describes the Docker Engine plugin API. To view information on plugins managed by Docker Engine, refer to Docker Engine plugin system.

This page is intended for people who want to develop their own Docker plugin. If you just want to learn about or use Docker plugins, look here.

What plugins are

A plugin is a process running on the same or a different host as the docker daemon, which registers itself by placing a file on the same docker host in one of the plugin directories described in Plugin discovery.

Plugins have human-readable names, which are short, lowercase strings. For example, flocker or weave.

Plugins can run inside or outside containers. Currently running them outside containers is recommended.

Plugin discovery

Docker discovers plugins by looking for them in the plugin directory whenever a user or container tries to use one by name.

There are three types of files which can be put in the plugin directory.

Plugins with UNIX domain socket files must run on the same docker host, whereas plugins with spec or json files can run on a different host if a remote URL is specified.

UNIX domain socket files must be located under /run/docker/plugins, whereas spec files can be located either under /etc/docker/plugins or /usr/lib/docker/plugins.

The name of the file (excluding the extension) determines the plugin name.

For example, the flocker plugin might create a UNIX socket at /run/docker/plugins/flocker.sock.

You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. For example, you can create the flocker socket under /run/docker/plugins/flocker/flocker.sock and only mount /run/docker/plugins/flocker inside the flocker container.

Docker always searches for unix sockets in /run/docker/plugins first. It checks for spec or json files under /etc/docker/plugins and /usr/lib/docker/plugins if the socket doesn’t exist. The directory scan stops as soon as it finds the first plugin definition with the given name.

JSON specification

This is the JSON format for a plugin:

{
+  "Name": "plugin-example",
+  "Addr": "https://example.com/docker/plugin",
+  "TLSConfig": {
+    "InsecureSkipVerify": false,
+    "CAFile": "/usr/shared/docker/certs/example-ca.pem",
+    "CertFile": "/usr/shared/docker/certs/example-cert.pem",
+    "KeyFile": "/usr/shared/docker/certs/example-key.pem"
+  }
+}
+

The TLSConfig field is optional and TLS will only be verified if this configuration is present.

Plugin lifecycle

Plugins should be started before Docker, and stopped after Docker. For example, when packaging a plugin for a platform which supports systemd, you might use systemd dependencies to manage startup and shutdown order.

When upgrading a plugin, you should first stop the Docker daemon, upgrade the plugin, then start Docker again.

Plugin activation

When a plugin is first referred to -- either by a user referring to it by name (e.g. docker run --volume-driver=foo) or a container already configured to use a plugin being started -- Docker looks for the named plugin in the plugin directory and activates it with a handshake. See Handshake API below.

Plugins are not activated automatically at Docker daemon startup. Rather, they are activated only lazily, or on-demand, when they are needed.

Systemd socket activation

Plugins may also be socket activated by systemd. The official Plugins helpers natively supports socket activation. In order for a plugin to be socket activated it needs a service file and a socket file.

The service file (for example /lib/systemd/system/your-plugin.service):

[Unit]
+Description=Your plugin
+Before=docker.service
+After=network.target your-plugin.socket
+Requires=your-plugin.socket docker.service
+
+[Service]
+ExecStart=/usr/lib/docker/your-plugin
+
+[Install]
+WantedBy=multi-user.target
+

The socket file (for example /lib/systemd/system/your-plugin.socket):

[Unit]
+Description=Your plugin
+
+[Socket]
+ListenStream=/run/docker/plugins/your-plugin.sock
+
+[Install]
+WantedBy=sockets.target
+

This will allow plugins to be actually started when the Docker daemon connects to the sockets they’re listening on (for instance the first time the daemon uses them or if one of the plugin goes down accidentally).

API design

The Plugin API is RPC-style JSON over HTTP, much like webhooks.

Requests flow from the Docker daemon to the plugin. So the plugin needs to implement an HTTP server and bind this to the UNIX socket mentioned in the “plugin discovery” section.

All requests are HTTP POST requests.

The API is versioned via an Accept header, which currently is always set to application/vnd.docker.plugins.v1+json.

Handshake API

Plugins are activated via the following “handshake” API call.

/Plugin.Activate

Request: empty body

Response:

{
+    "Implements": ["VolumeDriver"]
+}
+

Responds with a list of Docker subsystems which this plugin implements. After activation, the plugin will then be sent events from this subsystem.

Possible values are:

Plugin retries

Attempts to call a method on a plugin are retried with an exponential backoff for up to 30 seconds. This may help when packaging plugins as containers, since it gives plugin containers a chance to start up before failing any user containers which depend on them.

Plugins helpers

To ease plugins development, we’re providing an sdk for each kind of plugins currently supported by Docker at docker/go-plugins-helpers.

+

API, Usage, plugins, documentation, developer

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/extend/plugin_api/ +

+
diff --git a/devdocs/docker/engine%2Fextend%2Fplugins_authorization%2Findex.html b/devdocs/docker/engine%2Fextend%2Fplugins_authorization%2Findex.html new file mode 100644 index 00000000..89f8c6f3 --- /dev/null +++ b/devdocs/docker/engine%2Fextend%2Fplugins_authorization%2Findex.html @@ -0,0 +1,48 @@ +

Access authorization plugin

This document describes the Docker Engine plugins generally available in Docker Engine. To view information on plugins managed by Docker Engine, refer to Docker Engine plugin system.

Docker’s out-of-the-box authorization model is all or nothing. Any user with permission to access the Docker daemon can run any Docker client command. The same is true for callers using Docker’s Engine API to contact the daemon. If you require greater access control, you can create authorization plugins and add them to your Docker daemon configuration. Using an authorization plugin, a Docker administrator can configure granular access policies for managing access to the Docker daemon.

Anyone with the appropriate skills can develop an authorization plugin. These skills, at their most basic, are knowledge of Docker, understanding of REST, and sound programming knowledge. This document describes the architecture, state, and methods information available to an authorization plugin developer.

Basic principles

Docker’s plugin infrastructure enables extending Docker by loading, removing and communicating with third-party components using a generic API. The access authorization subsystem was built using this mechanism.

Using this subsystem, you don’t need to rebuild the Docker daemon to add an authorization plugin. You can add a plugin to an installed Docker daemon. You do need to restart the Docker daemon to add a new plugin.

An authorization plugin approves or denies requests to the Docker daemon based on both the current authentication context and the command context. The authentication context contains all user details and the authentication method. The command context contains all the relevant request data.

Authorization plugins must follow the rules described in Docker Plugin API. Each plugin must reside within directories described under the Plugin discovery section.

Note

The abbreviations AuthZ and AuthN mean authorization and authentication respectively.

Default user authorization mechanism

If TLS is enabled in the Docker daemon, the default user authorization flow extracts the user details from the certificate subject name. That is, the User field is set to the client certificate subject common name, and the AuthenticationMethod field is set to TLS.

Basic architecture

You are responsible for registering your plugin as part of the Docker daemon startup. You can install multiple plugins and chain them together. This chain can be ordered. Each request to the daemon passes in order through the chain. Only when all the plugins grant access to the resource, is the access granted.

When an HTTP request is made to the Docker daemon through the CLI or via the Engine API, the authentication subsystem passes the request to the installed authentication plugin(s). The request contains the user (caller) and command context. The plugin is responsible for deciding whether to allow or deny the request.

The sequence diagrams below depict an allow and deny authorization flow:

Authorization Allow flow

Authorization Deny flow

Each request sent to the plugin includes the authenticated user, the HTTP headers, and the request/response body. Only the user name and the authentication method used are passed to the plugin. Most importantly, no user credentials or tokens are passed. Finally, not all request/response bodies are sent to the authorization plugin. Only those request/response bodies where the Content-Type is either text/* or application/json are sent.

For commands that can potentially hijack the HTTP connection (HTTP Upgrade), such as exec, the authorization plugin is only called for the initial HTTP requests. Once the plugin approves the command, authorization is not applied to the rest of the flow. Specifically, the streaming data is not passed to the authorization plugins. For commands that return chunked HTTP response, such as logs and events, only the HTTP request is sent to the authorization plugins.

During request/response processing, some authorization flows might need to do additional queries to the Docker daemon. To complete such flows, plugins can call the daemon API similar to a regular user. To enable these additional queries, the plugin must provide the means for an administrator to configure proper authentication and security policies.

Docker client flows

To enable and configure the authorization plugin, the plugin developer must support the Docker client interactions detailed in this section.

Setting up Docker daemon

Enable the authorization plugin with a dedicated command line flag in the --authorization-plugin=PLUGIN_ID format. The flag supplies a PLUGIN_ID value. This value can be the plugin’s socket or a path to a specification file. Authorization plugins can be loaded without restarting the daemon. Refer to the dockerd documentation for more information.

$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
+

Docker’s authorization subsystem supports multiple --authorization-plugin parameters.

Calling authorized command (allow)

$ docker pull centos
+<...>
+f1b10cd84249: Pull complete
+<...>
+

Calling unauthorized command (deny)

$ docker pull centos
+<...>
+docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed.
+

Error from plugins

$ docker pull centos
+<...>
+docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?.
+

API schema and implementation

In addition to Docker’s standard plugin registration method, each plugin should implement the following two methods:

/AuthZPlugin.AuthZReq

Request:

{
+    "User":              "The user identification",
+    "UserAuthNMethod":   "The authentication method used",
+    "RequestMethod":     "The HTTP method",
+    "RequestURI":        "The HTTP request URI",
+    "RequestBody":       "Byte array containing the raw HTTP request body",
+    "RequestHeader":     "Byte array containing the raw HTTP request header as a map[string][]string "
+}
+

Response:

{
+    "Allow": "Determined whether the user is allowed or not",
+    "Msg":   "The authorization message",
+    "Err":   "The error message if things go wrong"
+}
+

/AuthZPlugin.AuthZRes

Request:

{
+    "User":              "The user identification",
+    "UserAuthNMethod":   "The authentication method used",
+    "RequestMethod":     "The HTTP method",
+    "RequestURI":        "The HTTP request URI",
+    "RequestBody":       "Byte array containing the raw HTTP request body",
+    "RequestHeader":     "Byte array containing the raw HTTP request header as a map[string][]string",
+    "ResponseBody":      "Byte array containing the raw HTTP response body",
+    "ResponseHeader":    "Byte array containing the raw HTTP response header as a map[string][]string",
+    "ResponseStatusCode":"Response status code"
+}
+

Response:

{
+   "Allow":              "Determined whether the user is allowed or not",
+   "Msg":                "The authorization message",
+   "Err":                "The error message if things go wrong"
+}
+

Request authorization

Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message.

Daemon -> Plugin

Name Type Description
User string The user identification
Authentication method string The authentication method used
Request method enum The HTTP method (GET/DELETE/POST)
Request URI string The HTTP request URI including API version (e.g., v.1.17/containers/json)
Request headers map[string]string Request headers as key value pairs (without the authorization header)
Request body []byte Raw request body

Plugin -> Daemon

Name Type Description
Allow bool Boolean value indicating whether the request is allowed or denied
Msg string Authorization message (will be returned to the client in case the access is denied)
Err string Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information)

Response authorization

The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message.

Daemon -> Plugin

Name Type Description
User string The user identification
Authentication method string The authentication method used
Request method string The HTTP method (GET/DELETE/POST)
Request URI string The HTTP request URI including API version (e.g., v.1.17/containers/json)
Request headers map[string]string Request headers as key value pairs (without the authorization header)
Request body []byte Raw request body
Response status code int Status code from the docker daemon
Response headers map[string]string Response headers as key value pairs
Response body []byte Raw docker daemon response body

Plugin -> Daemon

Name Type Description
Allow bool Boolean value indicating whether the response is allowed or denied
Msg string Authorization message (will be returned to the client in case the access is denied)
Err string Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information)
+

security, authorization, authentication, docker, documentation, plugin, extend

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/extend/plugins_authorization/ +

+
diff --git a/devdocs/docker/engine%2Fextend%2Fplugins_network%2Findex.html b/devdocs/docker/engine%2Fextend%2Fplugins_network%2Findex.html new file mode 100644 index 00000000..904b68cc --- /dev/null +++ b/devdocs/docker/engine%2Fextend%2Fplugins_network%2Findex.html @@ -0,0 +1,10 @@ +

Docker network driver plugins

This document describes Docker Engine network driver plugins generally available in Docker Engine. To view information on plugins managed by Docker Engine, refer to Docker Engine plugin system.

Docker Engine network plugins enable Engine deployments to be extended to support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN or something completely different. Network driver plugins are supported via the LibNetwork project. Each plugin is implemented as a “remote driver” for LibNetwork, which shares plugin infrastructure with Engine. Effectively, network driver plugins are activated in the same way as other plugins, and use the same kind of protocol.

Network plugins and swarm mode

Legacy plugins do not work in swarm mode. However, plugins written using the v2 plugin system do work in swarm mode, as long as they are installed on each swarm worker node.

Use network driver plugins

The means of installing and running a network driver plugin depend on the particular plugin. So, be sure to install your plugin according to the instructions obtained from the plugin developer.

Once running however, network driver plugins are used just like the built-in network drivers: by being mentioned as a driver in network-oriented Docker commands. For example,

$ docker network create --driver weave mynet
+

Some network driver plugins are listed in plugins

The mynet network is now owned by weave, so subsequent commands referring to that network will be sent to the plugin,

$ docker run --network=mynet busybox top
+

Find network plugins

Network plugins are written by third parties, and are published by those third parties, either on Docker Store or on the third party’s site.

Write a network plugin

Network plugins implement the Docker plugin API and the network plugin protocol

Network plugin protocol

The network driver protocol, in addition to the plugin activation call, is documented as part of libnetwork: https://github.com/docker/libnetwork/blob/master/docs/remote.md.

To interact with the Docker maintainers and other interested users, see the IRC channel #docker-network.

+

Examples, Usage, plugins, docker, documentation, user guide

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/extend/plugins_network/ +

+
diff --git a/devdocs/docker/engine%2Fextend%2Fplugins_volume%2Findex.html b/devdocs/docker/engine%2Fextend%2Fplugins_volume%2Findex.html new file mode 100644 index 00000000..14319e18 --- /dev/null +++ b/devdocs/docker/engine%2Fextend%2Fplugins_volume%2Findex.html @@ -0,0 +1,98 @@ +

Docker volume plugins

Docker Engine volume plugins enable Engine deployments to be integrated with external storage systems such as Amazon EBS, and enable data volumes to persist beyond the lifetime of a single Docker host. See the plugin documentation for more information.

Changelog

1.13.0

1.12.0

1.10.0

1.8.0

Command-line changes

To give a container access to a volume, use the --volume and --volume-driver flags on the docker container run command. The --volume (or -v) flag accepts a volume name and path on the host, and the --volume-driver flag accepts a driver type.

$ docker volume create --driver=flocker volumename
+
+$ docker container run -it --volume volumename:/data busybox sh
+

--volume

The --volume (or -v) flag takes a value that is in the format <volume_name>:<mountpoint>. The two parts of the value are separated by a colon (:) character.

volumedriver

Specifying a volumedriver in conjunction with a volumename allows you to use plugins such as Flocker to manage volumes external to a single host, such as those on EBS.

Create a VolumeDriver

The container creation endpoint (/containers/create) accepts a VolumeDriver field of type string allowing to specify the name of the driver. If not specified, it defaults to "local" (the default driver for local volumes).

Volume plugin protocol

If a plugin registers itself as a VolumeDriver when activated, it must provide the Docker Daemon with writeable paths on the host filesystem. The Docker daemon provides these paths to containers to consume. The Docker daemon makes the volumes available by bind-mounting the provided paths into the containers.

Note

Volume plugins should not write data to the /var/lib/docker/ directory, including /var/lib/docker/volumes. The /var/lib/docker/ directory is reserved for Docker.

/VolumeDriver.Create

Request:

{
+    "Name": "volume_name",
+    "Opts": {}
+}
+

Instruct the plugin that the user wants to create a volume, given a user specified volume name. The plugin does not need to actually manifest the volume on the filesystem yet (until Mount is called). Opts is a map of driver specific options passed through from the user request.

Response:

{
+    "Err": ""
+}
+

Respond with a string error if an error occurred.

/VolumeDriver.Remove

Request:

{
+    "Name": "volume_name"
+}
+

Delete the specified volume from disk. This request is issued when a user invokes docker rm -v to remove volumes associated with a container.

Response:

{
+    "Err": ""
+}
+

Respond with a string error if an error occurred.

/VolumeDriver.Mount

Request:

{
+    "Name": "volume_name",
+    "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c"
+}
+

Docker requires the plugin to provide a volume, given a user specified volume name. Mount is called once per container start. If the same volume_name is requested more than once, the plugin may need to keep track of each new mount request and provision at the first mount request and deprovision at the last corresponding unmount request.

ID is a unique ID for the caller that is requesting the mount.

Response:

Mountpoint is the path on the host (v1) or in the plugin (v2) where the volume has been made available.

Err is either empty or contains an error string.

/VolumeDriver.Path

Request:

{
+    "Name": "volume_name"
+}
+

Request the path to the volume with the given volume_name.

Response:

Respond with the path on the host (v1) or inside the plugin (v2) where the volume has been made available, and/or a string error if an error occurred.

Mountpoint is optional. However, the plugin may be queried again later if one is not provided.

/VolumeDriver.Unmount

Request:

{
+    "Name": "volume_name",
+    "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c"
+}
+

Docker is no longer using the named volume. Unmount is called once per container stop. Plugin may deduce that it is safe to deprovision the volume at this point.

ID is a unique ID for the caller that is requesting the mount.

Response:

{
+    "Err": ""
+}
+

Respond with a string error if an error occurred.

/VolumeDriver.Get

Request:

{
+    "Name": "volume_name"
+}
+

Get info about volume_name.

Response:

Respond with a string error if an error occurred. Mountpoint and Status are optional.

/VolumeDriver.List

Request:

{}
+

Get the list of volumes registered with the plugin.

Response:

Respond with a string error if an error occurred. Mountpoint is optional.

/VolumeDriver.Capabilities

Request:

{}
+

Get the list of capabilities the driver supports.

The driver is not required to implement Capabilities. If it is not implemented, the default values are used.

Response:

{
+  "Capabilities": {
+    "Scope": "global"
+  }
+}
+

Supported scopes are global and local. Any other value in Scope will be ignored, and local is used. Scope allows cluster managers to handle the volume in different ways. For instance, a scope of global, signals to the cluster manager that it only needs to create the volume once instead of on each Docker host. More capabilities may be added in the future.

+

Examples, Usage, volume, docker, data, volumes, plugin, api

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/extend/plugins_volume/ +

+
diff --git a/devdocs/docker/engine%2Findex.html b/devdocs/docker/engine%2Findex.html new file mode 100644 index 00000000..f5e4a079 --- /dev/null +++ b/devdocs/docker/engine%2Findex.html @@ -0,0 +1,9 @@ +

Docker Engine overview

+

Docker Engine is an open source containerization technology for building and containerizing your applications. Docker Engine acts as a client-server application with:

The CLI uses Docker APIs to control or interact with the Docker daemon through scripting or direct CLI commands. Many other Docker applications use the underlying API and CLI. The daemon creates and manage Docker objects, such as images, containers, networks, and volumes.

For more details, see Docker Architecture.

Docker user guide

To learn about Docker in more detail and to answer questions about usage and implementation, check out the overview page in “get started”.

Installation guides

The installation section shows you how to install Docker on a variety of platforms.

Release notes

A summary of the changes in each release in the current series can now be found on the separate Release Notes page

Feature Deprecation Policy

As changes are made to Docker there may be times when existing features need to be removed or replaced with newer features. Before an existing feature is removed it is labeled as “deprecated” within the documentation and remains in Docker for at least 3 stable releases unless specified explicitly otherwise. After that time it may be removed.

Users are expected to take note of the list of deprecated features each release and plan their migration away from those features, and (if applicable) towards the replacement features as soon as possible.

The complete list of deprecated features can be found on the Deprecated Features page.

Licensing

Docker is licensed under the Apache License, Version 2.0. See LICENSE for the full license text.

+

Engine

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Fbinaries%2Findex.html b/devdocs/docker/engine%2Finstall%2Fbinaries%2Findex.html new file mode 100644 index 00000000..27299a50 --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Fbinaries%2Findex.html @@ -0,0 +1,26 @@ +

Install Docker Engine from binaries

+ +

Important

This page contains information on how to install Docker using binaries. These instructions are mostly suitable for testing purposes. We do not recommend installing Docker using binaries in production environments as they will not be updated automatically with security updates. The Linux binaries described on this page are statically linked, which means that vulnerabilities in build-time dependencies are not automatically patched by security updates of your Linux distribution.

Updating binaries is also slightly more involved when compared to Docker packages installed using a package manager or through Docker Desktop, as it requires (manually) updating the installed version whenever there is a new release of Docker.

Also, static binaries may not include all functionalities provided by the dynamic packages.

On Windows and Mac, we recommend that you install Docker Desktop instead. For Linux, we recommend that you follow the instructions specific for your distribution.

If you want to try Docker or use it in a testing environment, but you’re not on a supported platform, you can try installing from static binaries. If possible, you should use packages built for your operating system, and use your operating system’s package management system to manage Docker installation and upgrades.

Static binaries for the Docker daemon binary are only available for Linux (as dockerd) and Windows (as dockerd.exe). Static binaries for the Docker client are available for Linux, Windows, and macOS (as docker).

This topic discusses binary installation for Linux, Windows, and macOS:

Install daemon and client binaries on Linux

Prerequisites

Before attempting to install Docker from binaries, be sure your host machine meets the prerequisites:

Secure your environment as much as possible

OS considerations

Enable SELinux or AppArmor if possible.

It is recommended to use AppArmor or SELinux if your Linux distribution supports either of the two. This helps improve security and blocks certain types of exploits. Review the documentation for your Linux distribution for instructions for enabling and configuring AppArmor or SELinux.

Security Warning

If either of the security mechanisms is enabled, do not disable it as a work-around to make Docker or its containers run. Instead, configure it correctly to fix any problems.

Docker daemon considerations

Install static binaries

  1. Download the static binary archive. Go to https://download.docker.com/linux/static/stable/, choose your hardware platform, and download the .tgz file relating to the version of Docker Engine you want to install.

  2. Extract the archive using the tar utility. The dockerd and docker binaries are extracted.

    $ tar xzvf /path/to/<FILE>.tar.gz
    +
  3. Optional: Move the binaries to a directory on your executable path, such as /usr/bin/. If you skip this step, you must provide the path to the executable when you invoke docker or dockerd commands.

    $ sudo cp docker/* /usr/bin/
    +
  4. Start the Docker daemon:

    $ sudo dockerd &
    +

    If you need to start the daemon with additional options, modify the above command accordingly or create and edit the file /etc/docker/daemon.json to add the custom configuration options.

  5. Verify that Docker is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

Install client binaries on macOS

Note

The following instructions are mostly suitable for testing purposes. The macOS binary includes the Docker client only. It does not include the dockerd daemon which is required to run containers. Therefore, we recommend that you install Docker Desktop instead.

The binaries for Mac also do not contain:

To install client binaries, perform the following steps:

  1. Download the static binary archive. Go to https://download.docker.com/mac/static/stable/ and select x86_64 (for Mac on Intel chip) or aarch64 (for Mac on Apple silicon), and then download the .tgz file relating to the version of Docker Engine you want to install.

  2. Extract the archive using the tar utility. The docker binary is extracted.

    $ tar xzvf /path/to/<FILE>.tar.gz
    +
  3. Clear the extended attributes to allow it run.

    $ sudo xattr -rc docker
    +

    Now, when you run the following command, you can see the Docker CLI usage instructions:

    $ docker/docker
    +
  4. Optional: Move the binary to a directory on your executable path, such as /usr/local/bin/. If you skip this step, you must provide the path to the executable when you invoke docker or dockerd commands.

    $ sudo cp docker/docker /usr/local/bin/
    +
  5. Verify that Docker is installed correctly by running the hello-world image. The value of <hostname> is a hostname or IP address running the Docker daemon and accessible to the client.

    $ sudo docker -H <hostname> run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

Install server and client binaries on Windows

Note

The following section describes how to install the Docker daemon on Windows Server which allows you to run Windows containers only. The binaries for Windows do not contain Docker components such as buildx, docker scan, and docker compose. If you are running Windows 10 or 11, we recommend that you install Docker Desktop instead.

Binary packages on Windows include both dockerd.exe and docker.exe. On Windows, these binaries only provide the ability to run native Windows containers (not Linux containers).

To install server and client binaries, perform the following steps:

  1. Download the static binary archive. Go to https://download.docker.com/win/static/stable/x86_64 and select the latest version from the list.

  2. Run the following PowerShell commands to install and extract the archive to your program files:

     PS C:\> Expand-Archive /path/to/<FILE>.zip -DestinationPath $Env:ProgramFiles
    +
  3. Register the service and start the Docker Engine:

     PS C:\> &$Env:ProgramFiles\Docker\dockerd --register-service
    + PS C:\> Start-Service docker
    +
  4. Verify that Docker is installed correctly by running the hello-world image.

    PS C:\> &$Env:ProgramFiles\Docker\docker run hello-world:nanoserver
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

Upgrade static binaries

To upgrade your manual installation of Docker Engine, first stop any dockerd or dockerd.exe processes running locally, then follow the regular installation steps to install the new version on top of the existing version.

Next steps

+

binaries, installation, docker, documentation, linux

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/binaries/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Fcentos%2Findex.html b/devdocs/docker/engine%2Finstall%2Fcentos%2Findex.html new file mode 100644 index 00000000..1a70363a --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Fcentos%2Findex.html @@ -0,0 +1,48 @@ +

Install Docker Engine on CentOS

+ +

To get started with Docker Engine on CentOS, make sure you meet the prerequisites, then install Docker.

Prerequisites

OS requirements

To install Docker Engine, you need a maintained version of CentOS 7, CentOS 8 (stream), or CentOS 9 (stream). Archived versions aren’t supported or tested.

The centos-extras repository must be enabled. This repository is enabled by default, but if you have disabled it, you need to re-enable it.

The overlay2 storage driver is recommended.

Uninstall old versions

Older versions of Docker were called docker or docker-engine. If these are installed, uninstall them, along with associated dependencies.

$ sudo yum remove docker \
+                  docker-client \
+                  docker-client-latest \
+                  docker-common \
+                  docker-latest \
+                  docker-latest-logrotate \
+                  docker-logrotate \
+                  docker-engine
+

It’s OK if yum reports that none of these packages are installed.

The contents of /var/lib/docker/, including images, containers, volumes, and networks, are preserved. The Docker Engine package is now called docker-ce.

Installation methods

You can install Docker Engine in different ways, depending on your needs:

Install using the repository

Before you install Docker Engine for the first time on a new host machine, you need to set up the Docker repository. Afterward, you can install and update Docker from the repository.

Set up the repository

Install the yum-utils package (which provides the yum-config-manager utility) and set up the repository.

$ sudo yum install -y yum-utils
+
+$ sudo yum-config-manager \
+    --add-repo \
+    https://download.docker.com/linux/centos/docker-ce.repo
+

Install Docker Engine

  1. Install the latest version of Docker Engine, containerd, and Docker Compose or go to the next step to install a specific version:

    $ sudo yum install docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +

    If prompted to accept the GPG key, verify that the fingerprint matches 060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35, and if so, accept it.

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  2. To install a specific version of Docker Engine, list the available versions in the repo, then select and install:

    a. List and sort the versions available in your repo. This example sorts results by version number, highest to lowest, and is truncated:

    $ yum list docker-ce --showduplicates | sort -r
    +
    +docker-ce.x86_64  3:18.09.1-3.el7                     docker-ce-stable
    +docker-ce.x86_64  3:18.09.0-3.el7                     docker-ce-stable
    +docker-ce.x86_64  18.06.1.ce-3.el7                    docker-ce-stable
    +docker-ce.x86_64  18.06.0.ce-3.el7                    docker-ce-stable
    +

    The list returned depends on which repositories are enabled, and is specific to your version of CentOS (indicated by the .el7 suffix in this example).

    b. Install a specific version by its fully qualified package name, which is the package name (docker-ce) plus the version string (2nd column) starting at the first colon (:), up to the first hyphen, separated by a hyphen (-). For example, docker-ce-18.09.1.

    $ sudo yum install docker-ce-<VERSION_STRING> docker-ce-cli-<VERSION_STRING> containerd.io docker-compose-plugin
    +

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Linux postinstall to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, follow the installation instructions, choosing the new version you want to install.

Install from a package

If you cannot use Docker’s repository to install Docker, you can download the .rpm file for your release and install it manually. You need to download a new file each time you want to upgrade Docker Engine.

  1. Go to https://download.docker.com/linux/centos/ and choose your version of CentOS. Then browse to x86_64/stable/Packages/ and download the .rpm file for the Docker version you want to install.

  2. Install Docker Engine, changing the path below to the path where you downloaded the Docker package.

    $ sudo yum install /path/to/package.rpm
    +

    Docker is installed but not started. The docker group is created, but no users are added to the group.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Post-installation steps for Linux to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, download the newer package file and repeat the installation procedure, using yum -y upgrade instead of yum -y install, and point to the new file.

Install using the convenience script

Docker provides a convenience script at get.docker.com to install Docker into development environments quickly and non-interactively. The convenience script is not recommended for production environments, but can be used as an example to create a provisioning script that is tailored to your needs. Also refer to the install using the repository steps to learn about installation steps to install using the package repository. The source code for the script is open source, and can be found in the docker-install repository on GitHub.

Always examine scripts downloaded from the internet before running them locally. Before installing, make yourself familiar with potential risks and limitations of the convenience script:

Tip: preview script steps before running

You can run the script with the DRY_RUN=1 option to learn what steps the script will execute during installation:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ DRY_RUN=1 sh ./get-docker.sh
+

This example downloads the script from get.docker.com and runs it to install the latest stable release of Docker on Linux:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ sudo sh get-docker.sh
+Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b2737
+<...>
+

Docker is installed. The docker service starts automatically on Debian based distributions. On RPM based distributions, such as CentOS, Fedora, RHEL or SLES, you need to start it manually using the appropriate systemctl or service command. As the message indicates, non-root users cannot run Docker commands by default.

Use Docker as a non-privileged user, or install in rootless mode?

The installation script requires root or sudo privileges to install and use Docker. If you want to grant non-root users access to Docker, refer to the post-installation steps for Linux. Docker can also be installed without root privileges, or configured to run in rootless mode. For instructions on running Docker in rootless mode, refer to run the Docker daemon as a non-root user (rootless mode).

Install pre-releases

Docker also provides a convenience script at test.docker.com to install pre-releases of Docker on Linux. This script is equivalent to the script at get.docker.com, but configures your package manager to enable the “test” channel from our package repository, which includes both stable and pre-releases (beta versions, release-candidates) of Docker. Use this script to get early access to new releases, and to evaluate them in a testing environment before they are released as stable.

To install the latest version of Docker on Linux from the “test” channel, run:

$ curl -fsSL https://test.docker.com -o test-docker.sh
+$ sudo sh test-docker.sh
+<...>
+

Upgrade Docker after using the convenience script

If you installed Docker using the convenience script, you should upgrade Docker using your package manager directly. There is no advantage to re-running the convenience script, and it can cause issues if it attempts to re-add repositories which have already been added to the host machine.

Uninstall Docker Engine

  1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:

    $ sudo yum remove docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +
  2. Images, containers, volumes, or customized configuration files on your host are not automatically removed. To delete all images, containers, and volumes:

    $ sudo rm -rf /var/lib/docker
    +$ sudo rm -rf /var/lib/containerd
    +

You must delete any edited configuration files manually.

Next steps

+

requirements, apt, installation, centos, rpm, install, uninstall, upgrade, update

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/centos/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Fdebian%2Findex.html b/devdocs/docker/engine%2Finstall%2Fdebian%2Findex.html new file mode 100644 index 00000000..6e8eef64 --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Fdebian%2Findex.html @@ -0,0 +1,47 @@ +

Install Docker Engine on Debian

+ +

To get started with Docker Engine on Debian, make sure you meet the prerequisites, then install Docker.

Prerequisites

OS requirements

To install Docker Engine, you need the 64-bit version of one of these Debian or Raspbian versions:

Docker Engine is supported on x86_64 (or amd64), armhf, and arm64 architectures.

Uninstall old versions

Older versions of Docker were called docker, docker.io, or docker-engine. If these are installed, uninstall them:

$ sudo apt-get remove docker docker-engine docker.io containerd runc
+

It’s OK if apt-get reports that none of these packages are installed.

The contents of /var/lib/docker/, including images, containers, volumes, and networks, are preserved. If you do not need to save your existing data, and want to start with a clean installation, refer to the uninstall Docker Engine section at the bottom of this page.

Installation methods

You can install Docker Engine in different ways, depending on your needs:

Install using the repository

Before you install Docker Engine for the first time on a new host machine, you need to set up the Docker repository. Afterward, you can install and update Docker from the repository.

Raspbian users cannot use this method!

For Raspbian, installing using the repository is not yet supported. You must instead use the convenience script.

Set up the repository

  1. Update the apt package index and install packages to allow apt to use a repository over HTTPS:

    $ sudo apt-get update
    +
    +$ sudo apt-get install \
    +    ca-certificates \
    +    curl \
    +    gnupg \
    +    lsb-release
    +
  2. Add Docker’s official GPG key:

    $ sudo mkdir -p /etc/apt/keyrings
    +$ curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
    +
  3. Use the following command to set up the repository:

    $ echo \
    +  "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
    +  $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
    +

Install Docker Engine

This procedure works for Debian on x86_64 / amd64, armhf, arm64, and Raspbian.

  1. Update the apt package index, and install the latest version of Docker Engine, containerd, and Docker Compose, or go to the next step to install a specific version:

     $ sudo apt-get update
    + $ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +

    Receiving a GPG error when running apt-get update?

    Your default umask may not be set correctly, causing the public key file for the repo to not be detected. Run the following command and then try to update your repo again: sudo chmod a+r /etc/apt/keyrings/docker.gpg.

  2. To install a specific version of Docker Engine, list the available versions in the repo, then select and install:

    a. List the versions available in your repo:

    $ apt-cache madison docker-ce
    +
    +  docker-ce | 5:18.09.1~3-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
    +  docker-ce | 5:18.09.0~3-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
    +  docker-ce | 18.06.1~ce~3-0~debian        | https://download.docker.com/linux/debian stretch/stable amd64 Packages
    +  docker-ce | 18.06.0~ce~3-0~debian        | https://download.docker.com/linux/debian stretch/stable amd64 Packages
    +

    b. Install a specific version using the version string from the second column, for example, 5:18.09.1~3-0~debian-stretch.

    $ sudo apt-get install docker-ce=<VERSION_STRING> docker-ce-cli=<VERSION_STRING> containerd.io docker-compose-plugin
    +
  3. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

Docker Engine is installed and running. The docker group is created but no users are added to it. You need to use sudo to run Docker commands. Continue to Linux postinstall to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, first run sudo apt-get update, then follow the installation instructions, choosing the new version you want to install.

Install from a package

If you cannot use Docker’s repository to install Docker Engine, you can download the .deb file for your release and install it manually. You need to download a new file each time you want to upgrade Docker.

  1. Go to https://download.docker.com/linux/debian/dists/, choose your Debian version, then browse to pool/stable/, choose amd64, armhf, or arm64, and download the .deb file for the Docker Engine version you want to install.

  2. Install Docker Engine, changing the path below to the path where you downloaded the Docker package.

    $ sudo dpkg -i /path/to/package.deb
    +

    The Docker daemon starts automatically.

  3. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

Docker Engine is installed and running. The docker group is created but no users are added to it. You need to use sudo to run Docker commands. Continue to Post-installation steps for Linux to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, download the newer package file and repeat the installation procedure, pointing to the new file.

Install using the convenience script

Docker provides a convenience script at get.docker.com to install Docker into development environments quickly and non-interactively. The convenience script is not recommended for production environments, but can be used as an example to create a provisioning script that is tailored to your needs. Also refer to the install using the repository steps to learn about installation steps to install using the package repository. The source code for the script is open source, and can be found in the docker-install repository on GitHub.

Always examine scripts downloaded from the internet before running them locally. Before installing, make yourself familiar with potential risks and limitations of the convenience script:

Tip: preview script steps before running

You can run the script with the DRY_RUN=1 option to learn what steps the script will execute during installation:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ DRY_RUN=1 sh ./get-docker.sh
+

This example downloads the script from get.docker.com and runs it to install the latest stable release of Docker on Linux:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ sudo sh get-docker.sh
+Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b2737
+<...>
+

Docker is installed. The docker service starts automatically on Debian based distributions. On RPM based distributions, such as CentOS, Fedora, RHEL or SLES, you need to start it manually using the appropriate systemctl or service command. As the message indicates, non-root users cannot run Docker commands by default.

Use Docker as a non-privileged user, or install in rootless mode?

The installation script requires root or sudo privileges to install and use Docker. If you want to grant non-root users access to Docker, refer to the post-installation steps for Linux. Docker can also be installed without root privileges, or configured to run in rootless mode. For instructions on running Docker in rootless mode, refer to run the Docker daemon as a non-root user (rootless mode).

Install pre-releases

Docker also provides a convenience script at test.docker.com to install pre-releases of Docker on Linux. This script is equivalent to the script at get.docker.com, but configures your package manager to enable the “test” channel from our package repository, which includes both stable and pre-releases (beta versions, release-candidates) of Docker. Use this script to get early access to new releases, and to evaluate them in a testing environment before they are released as stable.

To install the latest version of Docker on Linux from the “test” channel, run:

$ curl -fsSL https://test.docker.com -o test-docker.sh
+$ sudo sh test-docker.sh
+<...>
+

Upgrade Docker after using the convenience script

If you installed Docker using the convenience script, you should upgrade Docker using your package manager directly. There is no advantage to re-running the convenience script, and it can cause issues if it attempts to re-add repositories which have already been added to the host machine.

Uninstall Docker Engine

  1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:

    $ sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +
  2. Images, containers, volumes, or customized configuration files on your host are not automatically removed. To delete all images, containers, and volumes:

    $ sudo rm -rf /var/lib/docker
    +$ sudo rm -rf /var/lib/containerd
    +

You must delete any edited configuration files manually.

Next steps

+

requirements, apt, installation, debian, install, uninstall, upgrade, update

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/debian/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Ffedora%2Findex.html b/devdocs/docker/engine%2Finstall%2Ffedora%2Findex.html new file mode 100644 index 00000000..c2f6e0db --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Ffedora%2Findex.html @@ -0,0 +1,50 @@ +

Install Docker Engine on Fedora

+ +

To get started with Docker Engine on Fedora, make sure you meet the prerequisites, then install Docker.

Prerequisites

OS requirements

To install Docker Engine, you need the 64-bit version of one of these Fedora versions:

Uninstall old versions

Older versions of Docker were called docker or docker-engine. If these are installed, uninstall them, along with associated dependencies.

$ sudo dnf remove docker \
+                  docker-client \
+                  docker-client-latest \
+                  docker-common \
+                  docker-latest \
+                  docker-latest-logrotate \
+                  docker-logrotate \
+                  docker-selinux \
+                  docker-engine-selinux \
+                  docker-engine
+

It’s OK if dnf reports that none of these packages are installed.

The contents of /var/lib/docker/, including images, containers, volumes, and networks, are preserved. The Docker Engine package is now called docker-ce.

Installation methods

You can install Docker Engine in different ways, depending on your needs:

Install using the repository

Before you install Docker Engine for the first time on a new host machine, you need to set up the Docker repository. Afterward, you can install and update Docker from the repository.

Set up the repository

Install the dnf-plugins-core package (which provides the commands to manage your DNF repositories) and set up the repository.

$ sudo dnf -y install dnf-plugins-core
+
+$ sudo dnf config-manager \
+    --add-repo \
+    https://download.docker.com/linux/fedora/docker-ce.repo
+

Install Docker Engine

  1. Install the latest version of Docker Engine, containerd, and Docker Compose or go to the next step to install a specific version:

    $ sudo dnf install docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +

    If prompted to accept the GPG key, verify that the fingerprint matches 060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35, and if so, accept it.

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  2. To install a specific version of Docker Engine, list the available versions in the repo, then select and install:

    a. List and sort the versions available in your repo. This example sorts results by version number, highest to lowest, and is truncated:

    $ dnf list docker-ce  --showduplicates | sort -r
    +
    +docker-ce.x86_64  3:18.09.1-3.fc28                 docker-ce-stable
    +docker-ce.x86_64  3:18.09.0-3.fc28                 docker-ce-stable
    +docker-ce.x86_64  18.06.1.ce-3.fc28                docker-ce-stable
    +docker-ce.x86_64  18.06.0.ce-3.fc28                docker-ce-stable
    +

    The list returned depends on which repositories are enabled, and is specific to your version of Fedora (indicated by the .fc28 suffix in this example).

    b. Install a specific version by its fully qualified package name, which is the package name (docker-ce) plus the version string (2nd column) up to the first hyphen, separated by a hyphen (-), for example, docker-ce-3:18.09.1.

    $ sudo dnf -y install docker-ce-<VERSION_STRING> docker-ce-cli-<VERSION_STRING> containerd.io docker-compose-plugin
    +

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Linux postinstall to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, follow the installation instructions, choosing the new version you want to install.

Install from a package

If you cannot use Docker’s repository to install Docker, you can download the .rpm file for your release and install it manually. You need to download a new file each time you want to upgrade Docker Engine.

  1. Go to https://download.docker.com/linux/fedora/ and choose your version of Fedora. Then browse to x86_64/stable/Packages/ and download the .rpm file for the Docker version you want to install.

  2. Install Docker Engine, changing the path below to the path where you downloaded the Docker package.

    $ sudo dnf -y install /path/to/package.rpm
    +

    Docker is installed but not started. The docker group is created, but no users are added to the group.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Post-installation steps for Linux to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, download the newer package file and repeat the installation procedure, using dnf -y upgrade instead of dnf -y install, and point to the new file.

Install using the convenience script

Docker provides a convenience script at get.docker.com to install Docker into development environments quickly and non-interactively. The convenience script is not recommended for production environments, but can be used as an example to create a provisioning script that is tailored to your needs. Also refer to the install using the repository steps to learn about installation steps to install using the package repository. The source code for the script is open source, and can be found in the docker-install repository on GitHub.

Always examine scripts downloaded from the internet before running them locally. Before installing, make yourself familiar with potential risks and limitations of the convenience script:

Tip: preview script steps before running

You can run the script with the DRY_RUN=1 option to learn what steps the script will execute during installation:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ DRY_RUN=1 sh ./get-docker.sh
+

This example downloads the script from get.docker.com and runs it to install the latest stable release of Docker on Linux:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ sudo sh get-docker.sh
+Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b2737
+<...>
+

Docker is installed. The docker service starts automatically on Debian based distributions. On RPM based distributions, such as CentOS, Fedora, RHEL or SLES, you need to start it manually using the appropriate systemctl or service command. As the message indicates, non-root users cannot run Docker commands by default.

Use Docker as a non-privileged user, or install in rootless mode?

The installation script requires root or sudo privileges to install and use Docker. If you want to grant non-root users access to Docker, refer to the post-installation steps for Linux. Docker can also be installed without root privileges, or configured to run in rootless mode. For instructions on running Docker in rootless mode, refer to run the Docker daemon as a non-root user (rootless mode).

Install pre-releases

Docker also provides a convenience script at test.docker.com to install pre-releases of Docker on Linux. This script is equivalent to the script at get.docker.com, but configures your package manager to enable the “test” channel from our package repository, which includes both stable and pre-releases (beta versions, release-candidates) of Docker. Use this script to get early access to new releases, and to evaluate them in a testing environment before they are released as stable.

To install the latest version of Docker on Linux from the “test” channel, run:

$ curl -fsSL https://test.docker.com -o test-docker.sh
+$ sudo sh test-docker.sh
+<...>
+

Upgrade Docker after using the convenience script

If you installed Docker using the convenience script, you should upgrade Docker using your package manager directly. There is no advantage to re-running the convenience script, and it can cause issues if it attempts to re-add repositories which have already been added to the host machine.

Uninstall Docker Engine

  1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:

    $ sudo dnf remove docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +
  2. Images, containers, volumes, or customized configuration files on your host are not automatically removed. To delete all images, containers, and volumes:

    $ sudo rm -rf /var/lib/docker
    +$ sudo rm -rf /var/lib/containerd
    +

You must delete any edited configuration files manually.

Next steps

+

requirements, apt, installation, fedora, rpm, install, uninstall, upgrade, update

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/fedora/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Findex.html b/devdocs/docker/engine%2Finstall%2Findex.html new file mode 100644 index 00000000..246d0d39 --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Findex.html @@ -0,0 +1,10 @@ +

Install Docker Engine

+ +

Docker Desktop for Linux

Docker Desktop helps you build, share, and run containers easily on Mac and Windows as you do on Linux. We are excited to share that Docker Desktop for Linux is now GA. For more information, see Docker Desktop for Linux.

Supported platforms

Docker Engine is available on a variety of Linux platforms, macOS and Windows 10 through Docker Desktop, and as a static binary installation. Find your preferred operating system below.

Desktop

Platform x86_64 / amd64 arm64 (Apple Silicon)
Docker Desktop for Linux yes
Docker Desktop for Mac (macOS) yes yes
Docker Desktop for Windows yes

Server

Docker provides .deb and .rpm packages from the following Linux distributions and architectures:

Platform x86_64 / amd64 arm64 / aarch64 arm (32-bit) s390x
CentOS yes yes
Debian yes yes yes
Fedora yes yes
Raspbian yes
RHEL yes
SLES yes
Ubuntu yes yes yes yes
Binaries yes yes yes

Other Linux distributions

Note

While the instructions below may work, Docker does not test or verify installation on derivatives.

Docker provides binaries for manual installation of Docker Engine. These binaries are statically linked and can be used on any Linux distribution.

Release channels

Docker Engine has two types of update channels, stable and test:

Stable

Year-month releases are made from a release branch diverged from the master branch. The branch is created with format <year>.<month>, for example 20.10. The year-month name indicates the earliest possible calendar month to expect the release to be generally available. All further patch releases are performed from that branch. For example, once v20.10.0 is released, all subsequent patch releases are built from the 20.10 branch.

Test

In preparation for a new year-month release, a branch is created from the master branch with format YY.mm when the milestones desired by Docker for the release have achieved feature-complete. Pre-releases such as betas and release candidates are conducted from their respective release branches. Patch releases and the corresponding pre-releases are performed from within the corresponding release branch.

Support

Docker Engine releases of a year-month branch are supported with patches as needed for one month after the next year-month general availability release.

This means bug reports and backports to release branches are assessed until the end-of-life date.

After the year-month branch has reached end-of-life, the branch may be deleted from the repository.

Backporting

Backports to the Docker products are prioritized by the Docker company. A Docker employee or repository maintainer will endeavour to ensure sensible bugfixes make it into active releases.

If there are important fixes that ought to be considered for backport to active release branches, be sure to highlight this in the PR description or by adding a comment to the PR.

Upgrade path

Patch releases are always backward compatible with its year-month version.

Licensing

Docker is licensed under the Apache License, Version 2.0. See LICENSE for the full license text.

Reporting security issues

The Docker maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!

Please DO NOT file a public issue; instead send your report privately to security@docker.com.

Security reports are greatly appreciated, and Docker will publicly thank you for it.

Get started

After setting up Docker, you can learn the basics with Getting started with Docker.

+

docker, installation, install, Docker Engine, Docker Engine, docker editions, stable, edge

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Flinux-postinstall%2Findex.html b/devdocs/docker/engine%2Finstall%2Flinux-postinstall%2Findex.html new file mode 100644 index 00000000..d03224c8 --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Flinux-postinstall%2Findex.html @@ -0,0 +1,76 @@ +

Post-installation steps for Linux

+ +

This section contains optional procedures for configuring Linux hosts to work better with Docker.

Manage Docker as a non-root user

The Docker daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user root and other users can only access it using sudo. The Docker daemon always runs as the root user.

If you don’t want to preface the docker command with sudo, create a Unix group called docker and add users to it. When the Docker daemon starts, it creates a Unix socket accessible by members of the docker group.

Warning

The docker group grants privileges equivalent to the root user. For details on how this impacts security in your system, see Docker Daemon Attack Surface.

Note:

To run Docker without root privileges, see Run the Docker daemon as a non-root user (Rootless mode).

To create the docker group and add your user:

  1. Create the docker group.

    $ sudo groupadd docker
    +
  2. Add your user to the docker group.

    $ sudo usermod -aG docker $USER
    +
  3. Log out and log back in so that your group membership is re-evaluated.

    If testing on a virtual machine, it may be necessary to restart the virtual machine for changes to take effect.

    On a desktop Linux environment such as X Windows, log out of your session completely and then log back in.

    On Linux, you can also run the following command to activate the changes to groups:

    $ newgrp docker 
    +
  4. Verify that you can run docker commands without sudo.

    $ docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

    If you initially ran Docker CLI commands using sudo before adding your user to the docker group, you may see the following error, which indicates that your ~/.docker/ directory was created with incorrect permissions due to the sudo commands.

    WARNING: Error loading config file: /home/user/.docker/config.json -
    +stat /home/user/.docker/config.json: permission denied
    +

    To fix this problem, either remove the ~/.docker/ directory (it is recreated automatically, but any custom settings are lost), or change its ownership and permissions using the following commands:

    $ sudo chown "$USER":"$USER" /home/"$USER"/.docker -R
    +$ sudo chmod g+rwx "$HOME/.docker" -R
    +

Configure Docker to start on boot

Most current Linux distributions (RHEL, CentOS, Fedora, Debian, Ubuntu 16.04 and higher) use systemd to manage which services start when the system boots. On Debian and Ubuntu, the Docker service is configured to start on boot by default. To automatically start Docker and Containerd on boot for other distros, use the commands below:

$ sudo systemctl enable docker.service
+$ sudo systemctl enable containerd.service
+

To disable this behavior, use disable instead.

$ sudo systemctl disable docker.service
+$ sudo systemctl disable containerd.service
+

If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, see customize your systemd Docker daemon options.

Use a different storage engine

For information about the different storage engines, see Storage drivers. The default storage engine and the list of supported storage engines depend on your host’s Linux distribution and available kernel drivers.

Configure default logging driver

Docker provides the capability to collect and view log data from all containers running on a host via a series of logging drivers. The default logging driver, json-file, writes log data to JSON-formatted files on the host filesystem. Over time, these log files expand in size, leading to potential exhaustion of disk resources.

To alleviate such issues, either configure the json-file logging driver to enable log rotation, use an alternative logging driver such as the “local” logging driver that performs log rotation by default, or use a logging driver that sends logs to a remote logging aggregator.

Configure where the Docker daemon listens for connections

By default, the Docker daemon listens for connections on a UNIX socket to accept requests from local clients. It is possible to allow Docker to accept requests from remote hosts by configuring it to listen on an IP address and port as well as the UNIX socket. For more detailed information on this configuration option take a look at “Bind Docker to another host/port or a unix socket” section of the Docker CLI Reference article.

Secure your connection

Before configuring Docker to accept connections from remote hosts it is critically important that you understand the security implications of opening docker to the network. If steps are not taken to secure the connection, it is possible for remote non-root users to gain root access on the host. For more information on how to use TLS certificates to secure this connection, check this article on how to protect the Docker daemon socket.

Configuring Docker to accept remote connections can be done with the docker.service systemd unit file for Linux distributions using systemd, such as recent versions of RedHat, CentOS, Ubuntu and SLES, or with the daemon.json file which is recommended for Linux distributions that do not use systemd.

systemd vs daemon.json

Configuring Docker to listen for connections using both the systemd unit file and the daemon.json file causes a conflict that prevents Docker from starting.

Configuring remote access with systemd unit file

  1. Use the command sudo systemctl edit docker.service to open an override file for docker.service in a text editor.

  2. Add or modify the following lines, substituting your own values.

    [Service]
    +ExecStart=
    +ExecStart=/usr/bin/dockerd -H fd:// -H tcp://127.0.0.1:2375
    +
  3. Save the file.

  4. Reload the systemctl configuration.

     $ sudo systemctl daemon-reload
    +
  5. Restart Docker.

    $ sudo systemctl restart docker.service
    +
  6. Check to see whether the change was honored by reviewing the output of netstat to confirm dockerd is listening on the configured port.

    $ sudo netstat -lntp | grep dockerd
    +tcp        0      0 127.0.0.1:2375          0.0.0.0:*               LISTEN      3758/dockerd
    +

Configuring remote access with daemon.json +

  1. Set the hosts array in the /etc/docker/daemon.json to connect to the UNIX socket and an IP address, as follows:

    {
    +  "hosts": ["unix:///var/run/docker.sock", "tcp://127.0.0.1:2375"]
    +}
    +
  2. Restart Docker.

  3. Check to see whether the change was honored by reviewing the output of netstat to confirm dockerd is listening on the configured port.

    $ sudo netstat -lntp | grep dockerd
    +tcp        0      0 127.0.0.1:2375          0.0.0.0:*               LISTEN      3758/dockerd
    +

Enable IPv6 on the Docker daemon

To enable IPv6 on the Docker daemon, see Enable IPv6 support.

Troubleshooting

Kernel compatibility

Docker cannot run correctly if your kernel is older than version 3.10 or if it is missing some modules. To check kernel compatibility, you can download and run the check-config.sh script.

$ curl https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh > check-config.sh
+
+$ bash ./check-config.sh
+

The script only works on Linux, not macOS.

Cannot connect to the Docker daemon

If you see an error such as the following, your Docker client may be configured to connect to a Docker daemon on a different host, and that host may not be reachable.

Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?
+

To see which host your client is configured to connect to, check the value of the DOCKER_HOST variable in your environment.

$ env | grep DOCKER_HOST
+

If this command returns a value, the Docker client is set to connect to a Docker daemon running on that host. If it is unset, the Docker client is set to connect to the Docker daemon running on the local host. If it is set in error, use the following command to unset it:

$ unset DOCKER_HOST
+

You may need to edit your environment in files such as ~/.bashrc or ~/.profile to prevent the DOCKER_HOST variable from being set erroneously.

If DOCKER_HOST is set as intended, verify that the Docker daemon is running on the remote host and that a firewall or network outage is not preventing you from connecting.

IP forwarding problems

If you manually configure your network using systemd-network with systemd version 219 or higher, Docker containers may not be able to access your network. Beginning with systemd version 220, the forwarding setting for a given network (net.ipv4.conf.<interface>.forwarding) defaults to off. This setting prevents IP forwarding. It also conflicts with Docker’s behavior of enabling the net.ipv4.conf.all.forwarding setting within containers.

To work around this on RHEL, CentOS, or Fedora, edit the <interface>.network file in /usr/lib/systemd/network/ on your Docker host (ex: /usr/lib/systemd/network/80-container-host0.network) and add the following block within the [Network] section.

[Network]
+...
+IPForward=kernel
+# OR
+IPForward=true
+

This configuration allows IP forwarding from the container as expected.

DNS resolver found in resolv.conf and containers can't use it

Linux systems which use a GUI often have a network manager running, which uses a dnsmasq instance running on a loopback address such as 127.0.0.1 or 127.0.1.1 to cache DNS requests, and adds this entry to /etc/resolv.conf. The dnsmasq service speeds up DNS look-ups and also provides DHCP services. This configuration does not work within a Docker container which has its own network namespace, because the Docker container resolves loopback addresses such as 127.0.0.1 to itself, and it is very unlikely to be running a DNS server on its own loopback address.

If Docker detects that no DNS server referenced in /etc/resolv.conf is a fully functional DNS server, the following warning occurs and Docker uses the public DNS servers provided by Google at 8.8.8.8 and 8.8.4.4 for DNS resolution.

WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers
+can't use it. Using default external servers : [8.8.8.8 8.8.4.4]
+

If you see this warning, first check to see if you use dnsmasq:

$ ps aux |grep dnsmasq
+

If your container needs to resolve hosts which are internal to your network, the public nameservers are not adequate. You have two choices:

You only need to use one of these methods.

Specify DNS servers for Docker

The default location of the configuration file is /etc/docker/daemon.json. You can change the location of the configuration file using the --config-file daemon flag. The documentation below assumes the configuration file is located at /etc/docker/daemon.json.

  1. Create or edit the Docker daemon configuration file, which defaults to /etc/docker/daemon.json file, which controls the Docker daemon configuration.

    $ sudo nano /etc/docker/daemon.json
    +
  2. Add a dns key with one or more IP addresses as values. If the file has existing contents, you only need to add or edit the dns line.

    {
    +  "dns": ["8.8.8.8", "8.8.4.4"]
    +}
    +

    If your internal DNS server cannot resolve public IP addresses, include at least one DNS server which can, so that you can connect to Docker Hub and so that your containers can resolve internet domain names.

    Save and close the file.

  3. Restart the Docker daemon.

    $ sudo service docker restart
    +
  4. Verify that Docker can resolve external IP addresses by trying to pull an image:

    $ docker pull hello-world
    +
  5. If necessary, verify that Docker containers can resolve an internal hostname by pinging it.

    $ docker run --rm -it alpine ping -c4 <my_internal_host>
    +
    +PING google.com (192.168.1.2): 56 data bytes
    +64 bytes from 192.168.1.2: seq=0 ttl=41 time=7.597 ms
    +64 bytes from 192.168.1.2: seq=1 ttl=41 time=7.635 ms
    +64 bytes from 192.168.1.2: seq=2 ttl=41 time=7.660 ms
    +64 bytes from 192.168.1.2: seq=3 ttl=41 time=7.677 ms
    +

Disable dnsmasq +

Ubuntu

If you prefer not to change the Docker daemon’s configuration to use a specific IP address, follow these instructions to disable dnsmasq in NetworkManager.

  1. Edit the /etc/NetworkManager/NetworkManager.conf file.

  2. Comment out the dns=dnsmasq line by adding a # character to the beginning of the line.

    # dns=dnsmasq
    +

    Save and close the file.

  3. Restart both NetworkManager and Docker. As an alternative, you can reboot your system.

    $ sudo systemctl restart network-manager
    +$ sudo systemctl restart docker
    +
RHEL, CentOS, or Fedora

To disable dnsmasq on RHEL, CentOS, or Fedora:

  1. Disable the dnsmasq service:

    $ sudo systemctl stop dnsmasq
    +$ sudo systemctl disable dnsmasq
    +
  2. Configure the DNS servers manually using the Red Hat documentation.

Allow access to the remote API through a firewall

If you run a firewall on the same host as you run Docker and you want to access the Docker Remote API from another host and remote access is enabled, you need to configure your firewall to allow incoming connections on the Docker port, which defaults to 2376 if TLS encrypted transport is enabled or 2375 otherwise.

Two common firewall daemons are UFW (Uncomplicated Firewall) (often used for Ubuntu systems) and firewalld (often used for RPM-based systems). Consult the documentation for your OS and firewall, but the following information might help you get started. These options are fairly permissive and you may want to use a different configuration that locks your system down more.

Your kernel does not support cgroup swap limit capabilities

On Ubuntu or Debian hosts, You may see messages similar to the following when working with an image.

WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.
+

This warning does not occur on RPM-based systems, which enable these capabilities by default.

If you don’t need these capabilities, you can ignore the warning. You can enable these capabilities on Ubuntu or Debian by following these instructions. Memory and swap accounting incur an overhead of about 1% of the total available memory and a 10% overall performance degradation, even if Docker is not running.

  1. Log into the Ubuntu or Debian host as a user with sudo privileges.

  2. Edit the /etc/default/grub file. Add or edit the GRUB_CMDLINE_LINUX line to add the following two key-value pairs:

    GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
    +

    Save and close the file.

  3. Update GRUB.

    $ sudo update-grub
    +

    If your GRUB configuration file has incorrect syntax, an error occurs. In this case, repeat steps 2 and 3.

    The changes take effect when the system is rebooted.

Next steps

+

Docker, Docker documentation, requirements, apt, installation, ubuntu, install, uninstall, upgrade, update

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/linux-postinstall/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Frhel%2Findex.html b/devdocs/docker/engine%2Finstall%2Frhel%2Findex.html new file mode 100644 index 00000000..d827a5df --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Frhel%2Findex.html @@ -0,0 +1,49 @@ +

Install Docker Engine on RHEL

+ +

To get started with Docker Engine on RHEL, make sure you meet the prerequisites, then install Docker.

Prerequisites

Note

We currently only provide packages for RHEL on s390x (IBM Z). Other architectures are not yet supported for RHEL, but you may be able to install the CentOS packages on RHEL. Refer to the Install Docker Engine on CentOS page for details.

OS requirements

To install Docker Engine, you need a maintained version of RHEL 7 or 8 on s390x (IBM Z). Archived versions aren’t supported or tested.

The overlay2 storage driver is recommended.

Uninstall old versions

Older versions of Docker were called docker or docker-engine. If these are installed, uninstall them, along with associated dependencies. Also uninstall Podman and the associated dependencies if installed already.

$ sudo yum remove docker \
+                  docker-client \
+                  docker-client-latest \
+                  docker-common \
+                  docker-latest \
+                  docker-latest-logrotate \
+                  docker-logrotate \
+                  docker-engine \
+                  podman \
+                  runc
+

It’s OK if yum reports that none of these packages are installed.

The contents of /var/lib/docker/, including images, containers, volumes, and networks, are preserved. The Docker Engine package is now called docker-ce.

Installation methods

You can install Docker Engine in different ways, depending on your needs:

Install using the repository

Before you install Docker Engine for the first time on a new host machine, you need to set up the Docker repository. Afterward, you can install and update Docker from the repository.

Set up the repository

Install the yum-utils package (which provides the yum-config-manager utility) and set up the repository.

$ sudo yum install -y yum-utils
+
+$ sudo yum-config-manager \
+    --add-repo \
+    https://download.docker.com/linux/rhel/docker-ce.repo
+

Install Docker Engine

  1. Install the latest version of Docker Engine, containerd, and Docker Compose or go to the next step to install a specific version:

    $ sudo yum install docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +

    If prompted to accept the GPG key, verify that the fingerprint matches 060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35, and if so, accept it.

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  2. To install a specific version of Docker Engine, list the available versions in the repo, then select and install:

    a. List and sort the versions available in your repo. This example sorts results by version number, highest to lowest, and is truncated:

    $ yum list docker-ce --showduplicates | sort -r
    +
    +docker-ce.s390x                3:20.10.8-3.el8                 docker-ce-stable
    +docker-ce.s390x                3:20.10.7-3.el8                 docker-ce-stable
    +<...>
    +

    The list returned depends on which repositories are enabled, and is specific to your version of RHEL (indicated by the .el8 suffix in this example).

    b. Install a specific version by its fully qualified package name, which is the package name (docker-ce) plus the version string (2nd column) starting at the first colon (:), up to the first hyphen, separated by a hyphen (-). For example, docker-ce-20.10.7.

    $ sudo yum install docker-ce-<VERSION_STRING> docker-ce-cli-<VERSION_STRING> containerd.io docker-compose-plugin
    +

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Linux postinstall to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, follow the installation instructions, choosing the new version you want to install.

Install from a package

If you cannot use Docker’s repository to install Docker, you can download the .rpm file for your release and install it manually. You need to download a new file each time you want to upgrade Docker Engine.

  1. Go to https://download.docker.com/linux/rhel/ and choose your version of RHEL. Then browse to s390x/stable/Packages/ and download the .rpm file for the Docker version you want to install.

  2. Install Docker Engine, changing the path below to the path where you downloaded the Docker package.

    $ sudo yum install /path/to/package.rpm
    +

    Docker is installed but not started. The docker group is created, but no users are added to the group.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Post-installation steps for Linux to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, download the newer package file and repeat the installation procedure, using yum -y upgrade instead of yum -y install, and point to the new file.

Install using the convenience script

Docker provides a convenience script at get.docker.com to install Docker into development environments quickly and non-interactively. The convenience script is not recommended for production environments, but can be used as an example to create a provisioning script that is tailored to your needs. Also refer to the install using the repository steps to learn about installation steps to install using the package repository. The source code for the script is open source, and can be found in the docker-install repository on GitHub.

Always examine scripts downloaded from the internet before running them locally. Before installing, make yourself familiar with potential risks and limitations of the convenience script:

Tip: preview script steps before running

You can run the script with the DRY_RUN=1 option to learn what steps the script will execute during installation:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ DRY_RUN=1 sh ./get-docker.sh
+

This example downloads the script from get.docker.com and runs it to install the latest stable release of Docker on Linux:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ sudo sh get-docker.sh
+Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b2737
+<...>
+

Docker is installed. The docker service starts automatically on Debian based distributions. On RPM based distributions, such as CentOS, Fedora, RHEL or SLES, you need to start it manually using the appropriate systemctl or service command. As the message indicates, non-root users cannot run Docker commands by default.

Use Docker as a non-privileged user, or install in rootless mode?

The installation script requires root or sudo privileges to install and use Docker. If you want to grant non-root users access to Docker, refer to the post-installation steps for Linux. Docker can also be installed without root privileges, or configured to run in rootless mode. For instructions on running Docker in rootless mode, refer to run the Docker daemon as a non-root user (rootless mode).

Install pre-releases

Docker also provides a convenience script at test.docker.com to install pre-releases of Docker on Linux. This script is equivalent to the script at get.docker.com, but configures your package manager to enable the “test” channel from our package repository, which includes both stable and pre-releases (beta versions, release-candidates) of Docker. Use this script to get early access to new releases, and to evaluate them in a testing environment before they are released as stable.

To install the latest version of Docker on Linux from the “test” channel, run:

$ curl -fsSL https://test.docker.com -o test-docker.sh
+$ sudo sh test-docker.sh
+<...>
+

Upgrade Docker after using the convenience script

If you installed Docker using the convenience script, you should upgrade Docker using your package manager directly. There is no advantage to re-running the convenience script, and it can cause issues if it attempts to re-add repositories which have already been added to the host machine.

Uninstall Docker Engine

  1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:

    $ sudo yum remove docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +
  2. Images, containers, volumes, or customized configuration files on your host are not automatically removed. To delete all images, containers, and volumes:

    $ sudo rm -rf /var/lib/docker
    +$ sudo rm -rf /var/lib/containerd
    +

You must delete any edited configuration files manually.

Next steps

+

requirements, apt, installation, rhel, rpm, install, uninstall, upgrade, update, s390x, ibm-z

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/rhel/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Fsles%2Findex.html b/devdocs/docker/engine%2Finstall%2Fsles%2Findex.html new file mode 100644 index 00000000..facc0ebb --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Fsles%2Findex.html @@ -0,0 +1,46 @@ +

Install Docker Engine on SLES

+ +

To get started with Docker Engine on SLES, make sure you meet the prerequisites, then install Docker.

Prerequisites

Note

We currently only provide packages for SLES on s390x (IBM Z). Other architectures are not yet supported for SLES.

OS requirements

To install Docker Engine, you need a maintained version of SLES 15-SP2 or SLES 15-SP3 on s390x (IBM Z). Archived versions aren’t supported or tested.

The SCC SUSE repositories must be enabled.

The OpenSUSE SELinux repository must be enabled. This repository is not added by default, and you need to enable it for the version of SLES you are running. Run the following commands to add it:

$ sles_version="$(. /etc/os-release && echo "${VERSION_ID##*.}")"
+$ opensuse_repo="https://download.opensuse.org/repositories/security:SELinux/SLE_15_SP$sles_version/security:SELinux.repo"
+$ sudo zypper addrepo $opensuse_repo 
+

The overlay2 storage driver is recommended.

Uninstall old versions

Older versions of Docker were called docker or docker-engine. If these are installed, uninstall them, along with associated dependencies.

$ sudo zypper remove docker \
+                  docker-client \
+                  docker-client-latest \
+                  docker-common \
+                  docker-latest \
+                  docker-latest-logrotate \
+                  docker-logrotate \
+                  docker-engine \
+                  runc
+

It’s OK if zypper reports that none of these packages are installed.

The contents of /var/lib/docker/, including images, containers, volumes, and networks, are preserved. The Docker Engine package is now called docker-ce.

Installation methods

You can install Docker Engine in different ways, depending on your needs:

Install using the repository

Before you install Docker Engine for the first time on a new host machine, you need to set up the Docker repository. Afterward, you can install and update Docker from the repository.

Set up the repository

Set up the repository.

$ sudo zypper addrepo https://download.docker.com/linux/sles/docker-ce.repo
+

Install Docker Engine

  1. Install the latest version of Docker Engine, containerd, and Docker Compose or go to the next step to install a specific version:

    $ sudo zypper install docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +

    If prompted to accept the GPG key, verify that the fingerprint matches 060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35, and if so, accept it.

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  2. To install a specific version of Docker Engine, list the available versions in the repo, then select and install:

    a. List and sort the versions available in your repo. This example sorts results by version number, highest to lowest, and is truncated:

    $ sudo zypper search -s --match-exact docker-ce | sort -r
    +    
    +  v  | docker-ce | package | 3:20.10.8-3 | s390x | Docker CE Stable - s390x
    +  v  | docker-ce | package | 3:20.10.7-3 | s390x | Docker CE Stable - s390x
    +

    The list returned depends on which repositories are enabled, and is specific to your version of SLES.

    b. Install a specific version by its fully qualified package name, which is the package name (docker-ce) plus the version string (fourth column), separated by a hyphen (-). For example, docker-ce-3:20.10.8.

    $ sudo zypper install docker-ce-<VERSION_STRING> docker-ce-cli-<VERSION_STRING> containerd.io docker-compose-plugin
    +

    This command installs Docker, but it doesn’t start Docker. It also creates a docker group, however, it doesn’t add any users to the group by default.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Linux postinstall to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, follow the installation instructions, choosing the new version you want to install.

Install from a package

If you cannot use Docker’s repository to install Docker, you can download the .rpm file for your release and install it manually. You need to download a new file each time you want to upgrade Docker Engine.

  1. Go to https://download.docker.com/linux/sles/ and choose your version of SLES. Then browse to 15/s390x/stable/Packages/ and download the .rpm file for the Docker version you want to install.

  2. Install Docker Engine, changing the path below to the path where you downloaded the Docker package.

    $ sudo zypper install /path/to/package.rpm
    +

    Docker is installed but not started. The docker group is created, but no users are added to the group.

  3. Start Docker.

    $ sudo systemctl start docker
    +
  4. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

This installs and runs Docker Engine. Use sudo to run Docker commands. Continue to Post-installation steps for Linux to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, download the newer package file and repeat the installation procedure, using zypper -y upgrade instead of zypper -y install, and point to the new file.

Install using the convenience script

Docker provides a convenience script at get.docker.com to install Docker into development environments quickly and non-interactively. The convenience script is not recommended for production environments, but can be used as an example to create a provisioning script that is tailored to your needs. Also refer to the install using the repository steps to learn about installation steps to install using the package repository. The source code for the script is open source, and can be found in the docker-install repository on GitHub.

Always examine scripts downloaded from the internet before running them locally. Before installing, make yourself familiar with potential risks and limitations of the convenience script:

Tip: preview script steps before running

You can run the script with the DRY_RUN=1 option to learn what steps the script will execute during installation:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ DRY_RUN=1 sh ./get-docker.sh
+

This example downloads the script from get.docker.com and runs it to install the latest stable release of Docker on Linux:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ sudo sh get-docker.sh
+Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b2737
+<...>
+

Docker is installed. The docker service starts automatically on Debian based distributions. On RPM based distributions, such as CentOS, Fedora, RHEL or SLES, you need to start it manually using the appropriate systemctl or service command. As the message indicates, non-root users cannot run Docker commands by default.

Use Docker as a non-privileged user, or install in rootless mode?

The installation script requires root or sudo privileges to install and use Docker. If you want to grant non-root users access to Docker, refer to the post-installation steps for Linux. Docker can also be installed without root privileges, or configured to run in rootless mode. For instructions on running Docker in rootless mode, refer to run the Docker daemon as a non-root user (rootless mode).

Install pre-releases

Docker also provides a convenience script at test.docker.com to install pre-releases of Docker on Linux. This script is equivalent to the script at get.docker.com, but configures your package manager to enable the “test” channel from our package repository, which includes both stable and pre-releases (beta versions, release-candidates) of Docker. Use this script to get early access to new releases, and to evaluate them in a testing environment before they are released as stable.

To install the latest version of Docker on Linux from the “test” channel, run:

$ curl -fsSL https://test.docker.com -o test-docker.sh
+$ sudo sh test-docker.sh
+<...>
+

Upgrade Docker after using the convenience script

If you installed Docker using the convenience script, you should upgrade Docker using your package manager directly. There is no advantage to re-running the convenience script, and it can cause issues if it attempts to re-add repositories which have already been added to the host machine.

Uninstall Docker Engine

  1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:

    $ sudo zypper remove docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +
  2. Images, containers, volumes, or customized configuration files on your host are not automatically removed. To delete all images, containers, and volumes:

    $ sudo rm -rf /var/lib/docker
    +$ sudo rm -rf /var/lib/containerd
    +

You must delete any edited configuration files manually.

Next steps

+

requirements, apt, installation, centos, rpm, sles, install, uninstall, upgrade, update, s390x, ibm-z

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/sles/ +

+
diff --git a/devdocs/docker/engine%2Finstall%2Fubuntu%2Findex.html b/devdocs/docker/engine%2Finstall%2Fubuntu%2Findex.html new file mode 100644 index 00000000..c52bf582 --- /dev/null +++ b/devdocs/docker/engine%2Finstall%2Fubuntu%2Findex.html @@ -0,0 +1,47 @@ +

Install Docker Engine on Ubuntu

+ +

Docker Desktop for Linux

Docker Desktop helps you build, share, and run containers easily on Mac and Windows as you do on Linux. We are excited to share that Docker Desktop for Linux is now GA. For more information, see Docker Desktop for Linux.

To get started with Docker Engine on Ubuntu, make sure you meet the prerequisites, then install Docker.

Prerequisites

OS requirements

To install Docker Engine, you need the 64-bit version of one of these Ubuntu versions:

Docker Engine is supported on x86_64 (or amd64), armhf, arm64, and s390x architectures.

Uninstall old versions

Older versions of Docker were called docker, docker.io, or docker-engine. If these are installed, uninstall them:

$ sudo apt-get remove docker docker-engine docker.io containerd runc
+

It’s OK if apt-get reports that none of these packages are installed.

The contents of /var/lib/docker/, including images, containers, volumes, and networks, are preserved. If you do not need to save your existing data, and want to start with a clean installation, refer to the uninstall Docker Engine section at the bottom of this page.

Installation methods

You can install Docker Engine in different ways, depending on your needs:

Install using the repository

Before you install Docker Engine for the first time on a new host machine, you need to set up the Docker repository. Afterward, you can install and update Docker from the repository.

Set up the repository

  1. Update the apt package index and install packages to allow apt to use a repository over HTTPS:

    $ sudo apt-get update
    +
    +$ sudo apt-get install \
    +    ca-certificates \
    +    curl \
    +    gnupg \
    +    lsb-release
    +
  2. Add Docker’s official GPG key:

    $ sudo mkdir -p /etc/apt/keyrings
    +$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
    +
  3. Use the following command to set up the repository:

    $ echo \
    +  "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
    +  $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
    +

Install Docker Engine

  1. Update the apt package index, and install the latest version of Docker Engine, containerd, and Docker Compose, or go to the next step to install a specific version:

     $ sudo apt-get update
    + $ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +

    Receiving a GPG error when running apt-get update?

    Your default umask may not be set correctly, causing the public key file for the repo to not be detected. Run the following command and then try to update your repo again: sudo chmod a+r /etc/apt/keyrings/docker.gpg.

  2. To install a specific version of Docker Engine, list the available versions in the repo, then select and install:

    a. List the versions available in your repo:

    $ apt-cache madison docker-ce
    +
    +docker-ce | 5:20.10.16~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
    +docker-ce | 5:20.10.15~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
    +docker-ce | 5:20.10.14~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
    +docker-ce | 5:20.10.13~3-0~ubuntu-jammy | https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages
    +

    b. Install a specific version using the version string from the second column, for example, 5:20.10.16~3-0~ubuntu-jammy.

    $ sudo apt-get install docker-ce=<VERSION_STRING> docker-ce-cli=<VERSION_STRING> containerd.io docker-compose-plugin
    +
  3. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

Docker Engine is installed and running. The docker group is created but no users are added to it. You need to use sudo to run Docker commands. Continue to Linux postinstall to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, first run sudo apt-get update, then follow the installation instructions, choosing the new version you want to install.

Install from a package

If you cannot use Docker’s repository to install Docker Engine, you can download the .deb file for your release and install it manually. You need to download a new file each time you want to upgrade Docker.

  1. Go to https://download.docker.com/linux/ubuntu/dists/, choose your Ubuntu version, then browse to pool/stable/, choose amd64, armhf, arm64, or s390x, and download the .deb file for the Docker Engine version you want to install.

  2. Install Docker Engine, changing the path below to the path where you downloaded the Docker package.

    $ sudo dpkg -i /path/to/package.deb
    +

    The Docker daemon starts automatically.

  3. Verify that Docker Engine is installed correctly by running the hello-world image.

    $ sudo docker run hello-world
    +

    This command downloads a test image and runs it in a container. When the container runs, it prints a message and exits.

Docker Engine is installed and running. The docker group is created but no users are added to it. You need to use sudo to run Docker commands. Continue to Post-installation steps for Linux to allow non-privileged users to run Docker commands and for other optional configuration steps.

Upgrade Docker Engine

To upgrade Docker Engine, download the newer package file and repeat the installation procedure, pointing to the new file.

Install using the convenience script

Docker provides a convenience script at get.docker.com to install Docker into development environments quickly and non-interactively. The convenience script is not recommended for production environments, but can be used as an example to create a provisioning script that is tailored to your needs. Also refer to the install using the repository steps to learn about installation steps to install using the package repository. The source code for the script is open source, and can be found in the docker-install repository on GitHub.

Always examine scripts downloaded from the internet before running them locally. Before installing, make yourself familiar with potential risks and limitations of the convenience script:

Tip: preview script steps before running

You can run the script with the DRY_RUN=1 option to learn what steps the script will execute during installation:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ DRY_RUN=1 sh ./get-docker.sh
+

This example downloads the script from get.docker.com and runs it to install the latest stable release of Docker on Linux:

$ curl -fsSL https://get.docker.com -o get-docker.sh
+$ sudo sh get-docker.sh
+Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b2737
+<...>
+

Docker is installed. The docker service starts automatically on Debian based distributions. On RPM based distributions, such as CentOS, Fedora, RHEL or SLES, you need to start it manually using the appropriate systemctl or service command. As the message indicates, non-root users cannot run Docker commands by default.

Use Docker as a non-privileged user, or install in rootless mode?

The installation script requires root or sudo privileges to install and use Docker. If you want to grant non-root users access to Docker, refer to the post-installation steps for Linux. Docker can also be installed without root privileges, or configured to run in rootless mode. For instructions on running Docker in rootless mode, refer to run the Docker daemon as a non-root user (rootless mode).

Install pre-releases

Docker also provides a convenience script at test.docker.com to install pre-releases of Docker on Linux. This script is equivalent to the script at get.docker.com, but configures your package manager to enable the “test” channel from our package repository, which includes both stable and pre-releases (beta versions, release-candidates) of Docker. Use this script to get early access to new releases, and to evaluate them in a testing environment before they are released as stable.

To install the latest version of Docker on Linux from the “test” channel, run:

$ curl -fsSL https://test.docker.com -o test-docker.sh
+$ sudo sh test-docker.sh
+<...>
+

Upgrade Docker after using the convenience script

If you installed Docker using the convenience script, you should upgrade Docker using your package manager directly. There is no advantage to re-running the convenience script, and it can cause issues if it attempts to re-add repositories which have already been added to the host machine.

Uninstall Docker Engine

  1. Uninstall the Docker Engine, CLI, Containerd, and Docker Compose packages:

    $ sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +
  2. Images, containers, volumes, or customized configuration files on your host are not automatically removed. To delete all images, containers, and volumes:

    $ sudo rm -rf /var/lib/docker
    +$ sudo rm -rf /var/lib/containerd
    +

You must delete any edited configuration files manually.

Next steps

+

requirements, apt, installation, ubuntu, install, uninstall, upgrade, update

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/install/ubuntu/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fbuilder%2Findex.html b/devdocs/docker/engine%2Freference%2Fbuilder%2Findex.html new file mode 100644 index 00000000..533d2592 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fbuilder%2Findex.html @@ -0,0 +1,527 @@ +

Dockerfile reference

+

Docker can build images automatically by reading the instructions from a Dockerfile. A Dockerfile is a text document that contains all the commands a user could call on the command line to assemble an image. Using docker build users can create an automated build that executes several command-line instructions in succession.

This page describes the commands you can use in a Dockerfile. When you are done reading this page, refer to the Dockerfile Best Practices for a tip-oriented guide.

Usage

The docker build command builds an image from a Dockerfile and a context. The build’s context is the set of files at a specified location PATH or URL. The PATH is a directory on your local filesystem. The URL is a Git repository location.

The build context is processed recursively. So, a PATH includes any subdirectories and the URL includes the repository and its submodules. This example shows a build command that uses the current directory (.) as build context:

$ docker build .
+
+Sending build context to Docker daemon  6.51 MB
+...
+

The build is run by the Docker daemon, not by the CLI. The first thing a build process does is send the entire context (recursively) to the daemon. In most cases, it’s best to start with an empty directory as context and keep your Dockerfile in that directory. Add only the files needed for building the Dockerfile.

Warning

Do not use your root directory, /, as the PATH for your build context, as it causes the build to transfer the entire contents of your hard drive to the Docker daemon.

To use a file in the build context, the Dockerfile refers to the file specified in an instruction, for example, a COPY instruction. To increase the build’s performance, exclude files and directories by adding a .dockerignore file to the context directory. For information about how to create a .dockerignore file see the documentation on this page.

Traditionally, the Dockerfile is called Dockerfile and located in the root of the context. You use the -f flag with docker build to point to a Dockerfile anywhere in your file system.

$ docker build -f /path/to/a/Dockerfile .
+

You can specify a repository and tag at which to save the new image if the build succeeds:

$ docker build -t shykes/myapp .
+

To tag the image into multiple repositories after the build, add multiple -t parameters when you run the build command:

$ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest .
+

Before the Docker daemon runs the instructions in the Dockerfile, it performs a preliminary validation of the Dockerfile and returns an error if the syntax is incorrect:

$ docker build -t test/myapp .
+
+[+] Building 0.3s (2/2) FINISHED
+ => [internal] load build definition from Dockerfile                       0.1s
+ => => transferring dockerfile: 60B                                        0.0s
+ => [internal] load .dockerignore                                          0.1s
+ => => transferring context: 2B                                            0.0s
+error: failed to solve: rpc error: code = Unknown desc = failed to solve with frontend dockerfile.v0: failed to create LLB definition:
+dockerfile parse error line 2: unknown instruction: RUNCMD
+

The Docker daemon runs the instructions in the Dockerfile one-by-one, committing the result of each instruction to a new image if necessary, before finally outputting the ID of your new image. The Docker daemon will automatically clean up the context you sent.

Note that each instruction is run independently, and causes a new image to be created - so RUN cd /tmp will not have any effect on the next instructions.

Whenever possible, Docker uses a build-cache to accelerate the docker build process significantly. This is indicated by the CACHED message in the console output. (For more information, see the Dockerfile best practices guide):

$ docker build -t svendowideit/ambassador .
+
+[+] Building 0.7s (6/6) FINISHED
+ => [internal] load build definition from Dockerfile                       0.1s
+ => => transferring dockerfile: 286B                                       0.0s
+ => [internal] load .dockerignore                                          0.1s
+ => => transferring context: 2B                                            0.0s
+ => [internal] load metadata for docker.io/library/alpine:3.2              0.4s
+ => CACHED [1/2] FROM docker.io/library/alpine:3.2@sha256:e9a2035f9d0d7ce  0.0s
+ => CACHED [2/2] RUN apk add --no-cache socat                              0.0s
+ => exporting to image                                                     0.0s
+ => => exporting layers                                                    0.0s
+ => => writing image sha256:1affb80ca37018ac12067fa2af38cc5bcc2a8f09963de  0.0s
+ => => naming to docker.io/svendowideit/ambassador                         0.0s
+

By default, the build cache is based on results from previous builds on the machine on which you are building. The --cache-from option also allows you to use a build-cache that’s distributed through an image registry refer to the specifying external cache sources section in the docker build command reference.

When you’re done with your build, you’re ready to look into scanning your image with docker scan, and pushing your image to Docker Hub.

BuildKit

Starting with version 18.09, Docker supports a new backend for executing your builds that is provided by the moby/buildkit project. The BuildKit backend provides many benefits compared to the old implementation. For example, BuildKit can:

To use the BuildKit backend, you need to set an environment variable DOCKER_BUILDKIT=1 on the CLI before invoking docker build.

To learn about the Dockerfile syntax available to BuildKit-based builds refer to the documentation in the BuildKit repository.

Format

Here is the format of the Dockerfile:

# Comment
+INSTRUCTION arguments
+

The instruction is not case-sensitive. However, convention is for them to be UPPERCASE to distinguish them from arguments more easily.

Docker runs instructions in a Dockerfile in order. A Dockerfile must begin with a FROM instruction. This may be after parser directives, comments, and globally scoped ARGs. The FROM instruction specifies the Parent Image from which you are building. FROM may only be preceded by one or more ARG instructions, which declare arguments that are used in FROM lines in the Dockerfile.

Docker treats lines that begin with # as a comment, unless the line is a valid parser directive. A # marker anywhere else in a line is treated as an argument. This allows statements like:

# Comment
+RUN echo 'we are running some # of cool things'
+

Comment lines are removed before the Dockerfile instructions are executed, which means that the comment in the following example is not handled by the shell executing the echo command, and both examples below are equivalent:

RUN echo hello \
+# comment
+world
+
RUN echo hello \
+world
+

Line continuation characters are not supported in comments.

Note on whitespace

For backward compatibility, leading whitespace before comments (#) and instructions (such as RUN) are ignored, but discouraged. Leading whitespace is not preserved in these cases, and the following examples are therefore equivalent:

        # this is a comment-line
+    RUN echo hello
+RUN echo world
+
# this is a comment-line
+RUN echo hello
+RUN echo world
+

Note however, that whitespace in instruction arguments, such as the commands following RUN, are preserved, so the following example prints ` hello world` with leading whitespace as specified:

RUN echo "\
+     hello\
+     world"
+

Parser directives

Parser directives are optional, and affect the way in which subsequent lines in a Dockerfile are handled. Parser directives do not add layers to the build, and will not be shown as a build step. Parser directives are written as a special type of comment in the form # directive=value. A single directive may only be used once.

Once a comment, empty line or builder instruction has been processed, Docker no longer looks for parser directives. Instead it treats anything formatted as a parser directive as a comment and does not attempt to validate if it might be a parser directive. Therefore, all parser directives must be at the very top of a Dockerfile.

Parser directives are not case-sensitive. However, convention is for them to be lowercase. Convention is also to include a blank line following any parser directives. Line continuation characters are not supported in parser directives.

Due to these rules, the following examples are all invalid:

Invalid due to line continuation:

# direc \
+tive=value
+

Invalid due to appearing twice:

# directive=value1
+# directive=value2
+
+FROM ImageName
+

Treated as a comment due to appearing after a builder instruction:

FROM ImageName
+# directive=value
+

Treated as a comment due to appearing after a comment which is not a parser directive:

# About my dockerfile
+# directive=value
+FROM ImageName
+

The unknown directive is treated as a comment due to not being recognized. In addition, the known directive is treated as a comment due to appearing after a comment which is not a parser directive.

# unknowndirective=value
+# knowndirective=value
+

Non line-breaking whitespace is permitted in a parser directive. Hence, the following lines are all treated identically:

#directive=value
+# directive =value
+#	directive= value
+# directive = value
+#	  dIrEcTiVe=value
+

The following parser directives are supported:

syntax

# syntax=[remote image reference]
+

For example:

# syntax=docker/dockerfile:1
+# syntax=docker.io/docker/dockerfile:1
+# syntax=example.com/user/repo:tag@sha256:abcdef...
+

This feature is only available when using the BuildKit backend, and is ignored when using the classic builder backend.

The syntax directive defines the location of the Dockerfile syntax that is used to build the Dockerfile. The BuildKit backend allows to seamlessly use external implementations that are distributed as Docker images and execute inside a container sandbox environment.

Custom Dockerfile implementations allows you to:

Official releases

Docker distributes official versions of the images that can be used for building Dockerfiles under docker/dockerfile repository on Docker Hub. There are two channels where new images are released: stable and labs.

Stable channel follows semantic versioning. For example:

We recommend using docker/dockerfile:1, which always points to the latest stable release of the version 1 syntax, and receives both “minor” and “patch” updates for the version 1 release cycle. BuildKit automatically checks for updates of the syntax when performing a build, making sure you are using the most current version.

If a specific version is used, such as 1.2 or 1.2.1, the Dockerfile needs to be updated manually to continue receiving bugfixes and new features. Old versions of the Dockerfile remain compatible with the new versions of the builder.

labs channel

The “labs” channel provides early access to Dockerfile features that are not yet available in the stable channel. Labs channel images are released in conjunction with the stable releases, and follow the same versioning with the -labs suffix, for example:

Choose a channel that best fits your needs; if you want to benefit from new features, use the labs channel. Images in the labs channel provide a superset of the features in the stable channel; note that stable features in the labs channel images follow semantic versioning, but “labs” features do not, and newer releases may not be backwards compatible, so it is recommended to use an immutable full version variant.

For documentation on “labs” features, master builds, and nightly feature releases, refer to the description in the BuildKit source repository on GitHub. For a full list of available images, visit the image repository on Docker Hub, and the docker/dockerfile-upstream image repository for development builds.

escape

# escape=\ (backslash)
+

Or

# escape=` (backtick)
+

The escape directive sets the character used to escape characters in a Dockerfile. If not specified, the default escape character is \.

The escape character is used both to escape characters in a line, and to escape a newline. This allows a Dockerfile instruction to span multiple lines. Note that regardless of whether the escape parser directive is included in a Dockerfile, escaping is not performed in a RUN command, except at the end of a line.

Setting the escape character to ` is especially useful on Windows, where \ is the directory path separator. ` is consistent with Windows PowerShell.

Consider the following example which would fail in a non-obvious way on Windows. The second \ at the end of the second line would be interpreted as an escape for the newline, instead of a target of the escape from the first \. Similarly, the \ at the end of the third line would, assuming it was actually handled as an instruction, cause it be treated as a line continuation. The result of this dockerfile is that second and third lines are considered a single instruction:

FROM microsoft/nanoserver
+COPY testfile.txt c:\\
+RUN dir c:\
+

Results in:

PS E:\myproject> docker build -t cmd .
+
+Sending build context to Docker daemon 3.072 kB
+Step 1/2 : FROM microsoft/nanoserver
+ ---> 22738ff49c6d
+Step 2/2 : COPY testfile.txt c:\RUN dir c:
+GetFileAttributesEx c:RUN: The system cannot find the file specified.
+PS E:\myproject>
+

One solution to the above would be to use / as the target of both the COPY instruction, and dir. However, this syntax is, at best, confusing as it is not natural for paths on Windows, and at worst, error prone as not all commands on Windows support / as the path separator.

By adding the escape parser directive, the following Dockerfile succeeds as expected with the use of natural platform semantics for file paths on Windows:

# escape=`
+
+FROM microsoft/nanoserver
+COPY testfile.txt c:\
+RUN dir c:\
+

Results in:

PS E:\myproject> docker build -t succeeds --no-cache=true .
+
+Sending build context to Docker daemon 3.072 kB
+Step 1/3 : FROM microsoft/nanoserver
+ ---> 22738ff49c6d
+Step 2/3 : COPY testfile.txt c:\
+ ---> 96655de338de
+Removing intermediate container 4db9acbb1682
+Step 3/3 : RUN dir c:\
+ ---> Running in a2c157f842f5
+ Volume in drive C has no label.
+ Volume Serial Number is 7E6D-E0F7
+
+ Directory of c:\
+
+10/05/2016  05:04 PM             1,894 License.txt
+10/05/2016  02:22 PM    <DIR>          Program Files
+10/05/2016  02:14 PM    <DIR>          Program Files (x86)
+10/28/2016  11:18 AM                62 testfile.txt
+10/28/2016  11:20 AM    <DIR>          Users
+10/28/2016  11:20 AM    <DIR>          Windows
+           2 File(s)          1,956 bytes
+           4 Dir(s)  21,259,096,064 bytes free
+ ---> 01c7f3bef04f
+Removing intermediate container a2c157f842f5
+Successfully built 01c7f3bef04f
+PS E:\myproject>
+

Environment replacement

Environment variables (declared with the ENV statement) can also be used in certain instructions as variables to be interpreted by the Dockerfile. Escapes are also handled for including variable-like syntax into a statement literally.

Environment variables are notated in the Dockerfile either with $variable_name or ${variable_name}. They are treated equivalently and the brace syntax is typically used to address issues with variable names with no whitespace, like ${foo}_bar.

The ${variable_name} syntax also supports a few of the standard bash modifiers as specified below:

In all cases, word can be any string, including additional environment variables.

Escaping is possible by adding a \ before the variable: \$foo or \${foo}, for example, will translate to $foo and ${foo} literals respectively.

Example (parsed representation is displayed after the #):

FROM busybox
+ENV FOO=/bar
+WORKDIR ${FOO}   # WORKDIR /bar
+ADD . $FOO       # ADD . /bar
+COPY \$FOO /quux # COPY $FOO /quux
+

Environment variables are supported by the following list of instructions in the Dockerfile:

Environment variable substitution will use the same value for each variable throughout the entire instruction. In other words, in this example:

ENV abc=hello
+ENV abc=bye def=$abc
+ENV ghi=$abc
+

will result in def having a value of hello, not bye. However, ghi will have a value of bye because it is not part of the same instruction that set abc to bye.

.dockerignore file

Before the docker CLI sends the context to the docker daemon, it looks for a file named .dockerignore in the root directory of the context. If this file exists, the CLI modifies the context to exclude files and directories that match patterns in it. This helps to avoid unnecessarily sending large or sensitive files and directories to the daemon and potentially adding them to images using ADD or COPY.

The CLI interprets the .dockerignore file as a newline-separated list of patterns similar to the file globs of Unix shells. For the purposes of matching, the root of the context is considered to be both the working and the root directory. For example, the patterns /foo/bar and foo/bar both exclude a file or directory named bar in the foo subdirectory of PATH or in the root of the git repository located at URL. Neither excludes anything else.

If a line in .dockerignore file starts with # in column 1, then this line is considered as a comment and is ignored before interpreted by the CLI.

Here is an example .dockerignore file:

# comment
+*/temp*
+*/*/temp*
+temp?
+

This file causes the following build behavior:

Rule Behavior
# comment Ignored.
*/temp* Exclude files and directories whose names start with temp in any immediate subdirectory of the root. For example, the plain file /somedir/temporary.txt is excluded, as is the directory /somedir/temp.
*/*/temp* Exclude files and directories starting with temp from any subdirectory that is two levels below the root. For example, /somedir/subdir/temporary.txt is excluded.
temp? Exclude files and directories in the root directory whose names are a one-character extension of temp. For example, /tempa and /tempb are excluded.

Matching is done using Go’s filepath.Match rules. A preprocessing step removes leading and trailing whitespace and eliminates . and .. elements using Go’s filepath.Clean. Lines that are blank after preprocessing are ignored.

Beyond Go’s filepath.Match rules, Docker also supports a special wildcard string ** that matches any number of directories (including zero). For example, **/*.go will exclude all files that end with .go that are found in all directories, including the root of the build context.

Lines starting with ! (exclamation mark) can be used to make exceptions to exclusions. The following is an example .dockerignore file that uses this mechanism:

*.md
+!README.md
+

All markdown files except README.md are excluded from the context.

The placement of ! exception rules influences the behavior: the last line of the .dockerignore that matches a particular file determines whether it is included or excluded. Consider the following example:

*.md
+!README*.md
+README-secret.md
+

No markdown files are included in the context except README files other than README-secret.md.

Now consider this example:

*.md
+README-secret.md
+!README*.md
+

All of the README files are included. The middle line has no effect because !README*.md matches README-secret.md and comes last.

You can even use the .dockerignore file to exclude the Dockerfile and .dockerignore files. These files are still sent to the daemon because it needs them to do its job. But the ADD and COPY instructions do not copy them to the image.

Finally, you may want to specify which files to include in the context, rather than which to exclude. To achieve this, specify * as the first pattern, followed by one or more ! exception patterns.

Note

For historical reasons, the pattern . is ignored.

FROM

FROM [--platform=<platform>] <image> [AS <name>]
+

Or

FROM [--platform=<platform>] <image>[:<tag>] [AS <name>]
+

Or

FROM [--platform=<platform>] <image>[@<digest>] [AS <name>]
+

The FROM instruction initializes a new build stage and sets the Base Image for subsequent instructions. As such, a valid Dockerfile must start with a FROM instruction. The image can be any valid image – it is especially easy to start by pulling an image from the Public Repositories.

The optional --platform flag can be used to specify the platform of the image in case FROM references a multi-platform image. For example, linux/amd64, linux/arm64, or windows/amd64. By default, the target platform of the build request is used. Global build arguments can be used in the value of this flag, for example automatic platform ARGs allow you to force a stage to native build platform (--platform=$BUILDPLATFORM), and use it to cross-compile to the target platform inside the stage.

Understand how ARG and FROM interact

FROM instructions support variables that are declared by any ARG instructions that occur before the first FROM.

ARG  CODE_VERSION=latest
+FROM base:${CODE_VERSION}
+CMD  /code/run-app
+
+FROM extras:${CODE_VERSION}
+CMD  /code/run-extras
+

An ARG declared before a FROM is outside of a build stage, so it can’t be used in any instruction after a FROM. To use the default value of an ARG declared before the first FROM use an ARG instruction without a value inside of a build stage:

ARG VERSION=latest
+FROM busybox:$VERSION
+ARG VERSION
+RUN echo $VERSION > image_version
+

RUN

RUN has 2 forms:

The RUN instruction will execute any commands in a new layer on top of the current image and commit the results. The resulting committed image will be used for the next step in the Dockerfile.

Layering RUN instructions and generating commits conforms to the core concepts of Docker where commits are cheap and containers can be created from any point in an image’s history, much like source control.

The exec form makes it possible to avoid shell string munging, and to RUN commands using a base image that does not contain the specified shell executable.

The default shell for the shell form can be changed using the SHELL command.

In the shell form you can use a \ (backslash) to continue a single RUN instruction onto the next line. For example, consider these two lines:

RUN /bin/bash -c 'source $HOME/.bashrc; \
+echo $HOME'
+

Together they are equivalent to this single line:

RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME'
+

To use a different shell, other than ‘/bin/sh’, use the exec form passing in the desired shell. For example:

RUN ["/bin/bash", "-c", "echo hello"]
+

Note

The exec form is parsed as a JSON array, which means that you must use double-quotes (“) around words not single-quotes (‘).

Unlike the shell form, the exec form does not invoke a command shell. This means that normal shell processing does not happen. For example, RUN [ "echo", "$HOME" ] will not do variable substitution on $HOME. If you want shell processing then either use the shell form or execute a shell directly, for example: RUN [ "sh", "-c", "echo $HOME" ]. When using the exec form and executing a shell directly, as in the case for the shell form, it is the shell that is doing the environment variable expansion, not docker.

Note

In the JSON form, it is necessary to escape backslashes. This is particularly relevant on Windows where the backslash is the path separator. The following line would otherwise be treated as shell form due to not being valid JSON, and fail in an unexpected way:

RUN ["c:\windows\system32\tasklist.exe"]
+

The correct syntax for this example is:

RUN ["c:\\windows\\system32\\tasklist.exe"]
+

The cache for RUN instructions isn’t invalidated automatically during the next build. The cache for an instruction like RUN apt-get dist-upgrade -y will be reused during the next build. The cache for RUN instructions can be invalidated by using the --no-cache flag, for example docker build --no-cache.

See the Dockerfile Best Practices guide for more information.

The cache for RUN instructions can be invalidated by ADD and COPY instructions.

Known issues (RUN)

CMD

The CMD instruction has three forms:

There can only be one CMD instruction in a Dockerfile. If you list more than one CMD then only the last CMD will take effect.

The main purpose of a CMD is to provide defaults for an executing container. These defaults can include an executable, or they can omit the executable, in which case you must specify an ENTRYPOINT instruction as well.

If CMD is used to provide default arguments for the ENTRYPOINT instruction, both the CMD and ENTRYPOINT instructions should be specified with the JSON array format.

Note

The exec form is parsed as a JSON array, which means that you must use double-quotes (“) around words not single-quotes (‘).

Unlike the shell form, the exec form does not invoke a command shell. This means that normal shell processing does not happen. For example, CMD [ "echo", "$HOME" ] will not do variable substitution on $HOME. If you want shell processing then either use the shell form or execute a shell directly, for example: CMD [ "sh", "-c", "echo $HOME" ]. When using the exec form and executing a shell directly, as in the case for the shell form, it is the shell that is doing the environment variable expansion, not docker.

When used in the shell or exec formats, the CMD instruction sets the command to be executed when running the image.

If you use the shell form of the CMD, then the <command> will execute in /bin/sh -c:

FROM ubuntu
+CMD echo "This is a test." | wc -
+

If you want to run your <command> without a shell then you must express the command as a JSON array and give the full path to the executable. This array form is the preferred format of CMD. Any additional parameters must be individually expressed as strings in the array:

FROM ubuntu
+CMD ["/usr/bin/wc","--help"]
+

If you would like your container to run the same executable every time, then you should consider using ENTRYPOINT in combination with CMD. See ENTRYPOINT.

If the user specifies arguments to docker run then they will override the default specified in CMD.

Note

Do not confuse RUN with CMD. RUN actually runs a command and commits the result; CMD does not execute anything at build time, but specifies the intended command for the image.

LABEL

LABEL <key>=<value> <key>=<value> <key>=<value> ...
+

The LABEL instruction adds metadata to an image. A LABEL is a key-value pair. To include spaces within a LABEL value, use quotes and backslashes as you would in command-line parsing. A few usage examples:

LABEL "com.example.vendor"="ACME Incorporated"
+LABEL com.example.label-with-value="foo"
+LABEL version="1.0"
+LABEL description="This text illustrates \
+that label-values can span multiple lines."
+

An image can have more than one label. You can specify multiple labels on a single line. Prior to Docker 1.10, this decreased the size of the final image, but this is no longer the case. You may still choose to specify multiple labels in a single instruction, in one of the following two ways:

LABEL multi.label1="value1" multi.label2="value2" other="value3"
+
LABEL multi.label1="value1" \
+      multi.label2="value2" \
+      other="value3"
+

Labels included in base or parent images (images in the FROM line) are inherited by your image. If a label already exists but with a different value, the most-recently-applied value overrides any previously-set value.

To view an image’s labels, use the docker image inspect command. You can use the --format option to show just the labels;

$ docker image inspect --format='' myimage
+
{
+  "com.example.vendor": "ACME Incorporated",
+  "com.example.label-with-value": "foo",
+  "version": "1.0",
+  "description": "This text illustrates that label-values can span multiple lines.",
+  "multi.label1": "value1",
+  "multi.label2": "value2",
+  "other": "value3"
+}
+

MAINTAINER (deprecated)

MAINTAINER <name>
+

The MAINTAINER instruction sets the Author field of the generated images. The LABEL instruction is a much more flexible version of this and you should use it instead, as it enables setting any metadata you require, and can be viewed easily, for example with docker inspect. To set a label corresponding to the MAINTAINER field you could use:

LABEL org.opencontainers.image.authors="SvenDowideit@home.org.au"
+

This will then be visible from docker inspect with the other labels.

EXPOSE

EXPOSE <port> [<port>/<protocol>...]
+

The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime. You can specify whether the port listens on TCP or UDP, and the default is TCP if the protocol is not specified.

The EXPOSE instruction does not actually publish the port. It functions as a type of documentation between the person who builds the image and the person who runs the container, about which ports are intended to be published. To actually publish the port when running the container, use the -p flag on docker run to publish and map one or more ports, or the -P flag to publish all exposed ports and map them to high-order ports.

By default, EXPOSE assumes TCP. You can also specify UDP:

EXPOSE 80/udp
+

To expose on both TCP and UDP, include two lines:

EXPOSE 80/tcp
+EXPOSE 80/udp
+

In this case, if you use -P with docker run, the port will be exposed once for TCP and once for UDP. Remember that -P uses an ephemeral high-ordered host port on the host, so the port will not be the same for TCP and UDP.

Regardless of the EXPOSE settings, you can override them at runtime by using the -p flag. For example

$ docker run -p 80:80/tcp -p 80:80/udp ...
+

To set up port redirection on the host system, see using the -P flag. The docker network command supports creating networks for communication among containers without the need to expose or publish specific ports, because the containers connected to the network can communicate with each other over any port. For detailed information, see the overview of this feature.

ENV

ENV <key>=<value> ...
+

The ENV instruction sets the environment variable <key> to the value <value>. This value will be in the environment for all subsequent instructions in the build stage and can be replaced inline in many as well. The value will be interpreted for other environment variables, so quote characters will be removed if they are not escaped. Like command line parsing, quotes and backslashes can be used to include spaces within values.

Example:

ENV MY_NAME="John Doe"
+ENV MY_DOG=Rex\ The\ Dog
+ENV MY_CAT=fluffy
+

The ENV instruction allows for multiple <key>=<value> ... variables to be set at one time, and the example below will yield the same net results in the final image:

ENV MY_NAME="John Doe" MY_DOG=Rex\ The\ Dog \
+    MY_CAT=fluffy
+

The environment variables set using ENV will persist when a container is run from the resulting image. You can view the values using docker inspect, and change them using docker run --env <key>=<value>.

Environment variable persistence can cause unexpected side effects. For example, setting ENV DEBIAN_FRONTEND=noninteractive changes the behavior of apt-get, and may confuse users of your image.

If an environment variable is only needed during build, and not in the final image, consider setting a value for a single command instead:

RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y ...
+

Or using ARG, which is not persisted in the final image:

ARG DEBIAN_FRONTEND=noninteractive
+RUN apt-get update && apt-get install -y ...
+

Alternative syntax

The ENV instruction also allows an alternative syntax ENV <key> <value>, omitting the =. For example:

ENV MY_VAR my-value
+

This syntax does not allow for multiple environment-variables to be set in a single ENV instruction, and can be confusing. For example, the following sets a single environment variable (ONE) with value "TWO= THREE=world":

ENV ONE TWO= THREE=world
+

The alternative syntax is supported for backward compatibility, but discouraged for the reasons outlined above, and may be removed in a future release.

ADD

ADD has two forms:

ADD [--chown=<user>:<group>] <src>... <dest>
+ADD [--chown=<user>:<group>] ["<src>",... "<dest>"]
+

The latter form is required for paths containing whitespace.

Note

The --chown feature is only supported on Dockerfiles used to build Linux containers, and will not work on Windows containers. Since user and group ownership concepts do not translate between Linux and Windows, the use of /etc/passwd and /etc/group for translating user and group names to IDs restricts this feature to only be viable for Linux OS-based containers.

The ADD instruction copies new files, directories or remote file URLs from <src> and adds them to the filesystem of the image at the path <dest>.

Multiple <src> resources may be specified but if they are files or directories, their paths are interpreted as relative to the source of the context of the build.

Each <src> may contain wildcards and matching will be done using Go’s filepath.Match rules. For example:

To add all files starting with “hom”:

ADD hom* /mydir/
+

In the example below, ? is replaced with any single character, e.g., “home.txt”.

ADD hom?.txt /mydir/
+

The <dest> is an absolute path, or a path relative to WORKDIR, into which the source will be copied inside the destination container.

The example below uses a relative path, and adds “test.txt” to <WORKDIR>/relativeDir/:

ADD test.txt relativeDir/
+

Whereas this example uses an absolute path, and adds “test.txt” to /absoluteDir/

ADD test.txt /absoluteDir/
+

When adding files or directories that contain special characters (such as [ and ]), you need to escape those paths following the Golang rules to prevent them from being treated as a matching pattern. For example, to add a file named arr[0].txt, use the following;

ADD arr[[]0].txt /mydir/
+

All new files and directories are created with a UID and GID of 0, unless the optional --chown flag specifies a given username, groupname, or UID/GID combination to request specific ownership of the content added. The format of the --chown flag allows for either username and groupname strings or direct integer UID and GID in any combination. Providing a username without groupname or a UID without GID will use the same numeric UID as the GID. If a username or groupname is provided, the container’s root filesystem /etc/passwd and /etc/group files will be used to perform the translation from name to integer UID or GID respectively. The following examples show valid definitions for the --chown flag:

ADD --chown=55:mygroup files* /somedir/
+ADD --chown=bin files* /somedir/
+ADD --chown=1 files* /somedir/
+ADD --chown=10:11 files* /somedir/
+

If the container root filesystem does not contain either /etc/passwd or /etc/group files and either user or group names are used in the --chown flag, the build will fail on the ADD operation. Using numeric IDs requires no lookup and will not depend on container root filesystem content.

In the case where <src> is a remote file URL, the destination will have permissions of 600. If the remote file being retrieved has an HTTP Last-Modified header, the timestamp from that header will be used to set the mtime on the destination file. However, like any other file processed during an ADD, mtime will not be included in the determination of whether or not the file has changed and the cache should be updated.

Note

If you build by passing a Dockerfile through STDIN (docker build - < somefile), there is no build context, so the Dockerfile can only contain a URL based ADD instruction. You can also pass a compressed archive through STDIN: (docker build - < archive.tar.gz), the Dockerfile at the root of the archive and the rest of the archive will be used as the context of the build.

If your URL files are protected using authentication, you need to use RUN wget, RUN curl or use another tool from within the container as the ADD instruction does not support authentication.

Note

The first encountered ADD instruction will invalidate the cache for all following instructions from the Dockerfile if the contents of <src> have changed. This includes invalidating the cache for RUN instructions. See the Dockerfile Best Practices guide – Leverage build cache for more information.

ADD obeys the following rules:

Note

The directory itself is not copied, just its contents.

COPY

COPY has two forms:

COPY [--chown=<user>:<group>] <src>... <dest>
+COPY [--chown=<user>:<group>] ["<src>",... "<dest>"]
+

This latter form is required for paths containing whitespace

Note

The --chown feature is only supported on Dockerfiles used to build Linux containers, and will not work on Windows containers. Since user and group ownership concepts do not translate between Linux and Windows, the use of /etc/passwd and /etc/group for translating user and group names to IDs restricts this feature to only be viable for Linux OS-based containers.

The COPY instruction copies new files or directories from <src> and adds them to the filesystem of the container at the path <dest>.

Multiple <src> resources may be specified but the paths of files and directories will be interpreted as relative to the source of the context of the build.

Each <src> may contain wildcards and matching will be done using Go’s filepath.Match rules. For example:

To add all files starting with “hom”:

COPY hom* /mydir/
+

In the example below, ? is replaced with any single character, e.g., “home.txt”.

COPY hom?.txt /mydir/
+

The <dest> is an absolute path, or a path relative to WORKDIR, into which the source will be copied inside the destination container.

The example below uses a relative path, and adds “test.txt” to <WORKDIR>/relativeDir/:

COPY test.txt relativeDir/
+

Whereas this example uses an absolute path, and adds “test.txt” to /absoluteDir/

COPY test.txt /absoluteDir/
+

When copying files or directories that contain special characters (such as [ and ]), you need to escape those paths following the Golang rules to prevent them from being treated as a matching pattern. For example, to copy a file named arr[0].txt, use the following;

COPY arr[[]0].txt /mydir/
+

All new files and directories are created with a UID and GID of 0, unless the optional --chown flag specifies a given username, groupname, or UID/GID combination to request specific ownership of the copied content. The format of the --chown flag allows for either username and groupname strings or direct integer UID and GID in any combination. Providing a username without groupname or a UID without GID will use the same numeric UID as the GID. If a username or groupname is provided, the container’s root filesystem /etc/passwd and /etc/group files will be used to perform the translation from name to integer UID or GID respectively. The following examples show valid definitions for the --chown flag:

COPY --chown=55:mygroup files* /somedir/
+COPY --chown=bin files* /somedir/
+COPY --chown=1 files* /somedir/
+COPY --chown=10:11 files* /somedir/
+

If the container root filesystem does not contain either /etc/passwd or /etc/group files and either user or group names are used in the --chown flag, the build will fail on the COPY operation. Using numeric IDs requires no lookup and does not depend on container root filesystem content.

Note

If you build using STDIN (docker build - < somefile), there is no build context, so COPY can’t be used.

Optionally COPY accepts a flag --from=<name> that can be used to set the source location to a previous build stage (created with FROM .. AS <name>) that will be used instead of a build context sent by the user. In case a build stage with a specified name can’t be found an image with the same name is attempted to be used instead.

COPY obeys the following rules:

Note

The directory itself is not copied, just its contents.

Note

The first encountered COPY instruction will invalidate the cache for all following instructions from the Dockerfile if the contents of <src> have changed. This includes invalidating the cache for RUN instructions. See the Dockerfile Best Practices guide – Leverage build cache for more information.

ENTRYPOINT

ENTRYPOINT has two forms:

The exec form, which is the preferred form:

ENTRYPOINT ["executable", "param1", "param2"]
+

The shell form:

ENTRYPOINT command param1 param2
+

An ENTRYPOINT allows you to configure a container that will run as an executable.

For example, the following starts nginx with its default content, listening on port 80:

$ docker run -i -t --rm -p 80:80 nginx
+

Command line arguments to docker run <image> will be appended after all elements in an exec form ENTRYPOINT, and will override all elements specified using CMD. This allows arguments to be passed to the entry point, i.e., docker run <image> -d will pass the -d argument to the entry point. You can override the ENTRYPOINT instruction using the docker run --entrypoint flag.

The shell form prevents any CMD or run command line arguments from being used, but has the disadvantage that your ENTRYPOINT will be started as a subcommand of /bin/sh -c, which does not pass signals. This means that the executable will not be the container’s PID 1 - and will not receive Unix signals - so your executable will not receive a SIGTERM from docker stop <container>.

Only the last ENTRYPOINT instruction in the Dockerfile will have an effect.

Exec form ENTRYPOINT example

You can use the exec form of ENTRYPOINT to set fairly stable default commands and arguments and then use either form of CMD to set additional defaults that are more likely to be changed.

FROM ubuntu
+ENTRYPOINT ["top", "-b"]
+CMD ["-c"]
+

When you run the container, you can see that top is the only process:

$ docker run -it --rm --name test  top -H
+
+top - 08:25:00 up  7:27,  0 users,  load average: 0.00, 0.01, 0.05
+Threads:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+%Cpu(s):  0.1 us,  0.1 sy,  0.0 ni, 99.7 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
+KiB Mem:   2056668 total,  1616832 used,   439836 free,    99352 buffers
+KiB Swap:  1441840 total,        0 used,  1441840 free.  1324440 cached Mem
+
+  PID USER      PR  NI    VIRT    RES    SHR S %CPU %MEM     TIME+ COMMAND
+    1 root      20   0   19744   2336   2080 R  0.0  0.1   0:00.04 top
+

To examine the result further, you can use docker exec:

$ docker exec -it test ps aux
+
+USER       PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
+root         1  2.6  0.1  19752  2352 ?        Ss+  08:24   0:00 top -b -H
+root         7  0.0  0.1  15572  2164 ?        R+   08:25   0:00 ps aux
+

And you can gracefully request top to shut down using docker stop test.

The following Dockerfile shows using the ENTRYPOINT to run Apache in the foreground (i.e., as PID 1):

FROM debian:stable
+RUN apt-get update && apt-get install -y --force-yes apache2
+EXPOSE 80 443
+VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"]
+ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"]
+

If you need to write a starter script for a single executable, you can ensure that the final executable receives the Unix signals by using exec and gosu commands:

#!/usr/bin/env bash
+set -e
+
+if [ "$1" = 'postgres' ]; then
+    chown -R postgres "$PGDATA"
+
+    if [ -z "$(ls -A "$PGDATA")" ]; then
+        gosu postgres initdb
+    fi
+
+    exec gosu postgres "$@"
+fi
+
+exec "$@"
+

Lastly, if you need to do some extra cleanup (or communicate with other containers) on shutdown, or are co-ordinating more than one executable, you may need to ensure that the ENTRYPOINT script receives the Unix signals, passes them on, and then does some more work:

#!/bin/sh
+# Note: I've written this using sh so it works in the busybox container too
+
+# USE the trap if you need to also do manual cleanup after the service is stopped,
+#     or need to start multiple services in the one container
+trap "echo TRAPed signal" HUP INT QUIT TERM
+
+# start service in background here
+/usr/sbin/apachectl start
+
+echo "[hit enter key to exit] or run 'docker stop <container>'"
+read
+
+# stop service and clean up here
+echo "stopping apache"
+/usr/sbin/apachectl stop
+
+echo "exited $0"
+

If you run this image with docker run -it --rm -p 80:80 --name test apache, you can then examine the container’s processes with docker exec, or docker top, and then ask the script to stop Apache:

$ docker exec -it test ps aux
+
+USER       PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
+root         1  0.1  0.0   4448   692 ?        Ss+  00:42   0:00 /bin/sh /run.sh 123 cmd cmd2
+root        19  0.0  0.2  71304  4440 ?        Ss   00:42   0:00 /usr/sbin/apache2 -k start
+www-data    20  0.2  0.2 360468  6004 ?        Sl   00:42   0:00 /usr/sbin/apache2 -k start
+www-data    21  0.2  0.2 360468  6000 ?        Sl   00:42   0:00 /usr/sbin/apache2 -k start
+root        81  0.0  0.1  15572  2140 ?        R+   00:44   0:00 ps aux
+
+$ docker top test
+
+PID                 USER                COMMAND
+10035               root                {run.sh} /bin/sh /run.sh 123 cmd cmd2
+10054               root                /usr/sbin/apache2 -k start
+10055               33                  /usr/sbin/apache2 -k start
+10056               33                  /usr/sbin/apache2 -k start
+
+$ /usr/bin/time docker stop test
+
+test
+real	0m 0.27s
+user	0m 0.03s
+sys	0m 0.03s
+

Note

You can override the ENTRYPOINT setting using --entrypoint, but this can only set the binary to exec (no sh -c will be used).

Note

The exec form is parsed as a JSON array, which means that you must use double-quotes (“) around words not single-quotes (‘).

Unlike the shell form, the exec form does not invoke a command shell. This means that normal shell processing does not happen. For example, ENTRYPOINT [ "echo", "$HOME" ] will not do variable substitution on $HOME. If you want shell processing then either use the shell form or execute a shell directly, for example: ENTRYPOINT [ "sh", "-c", "echo $HOME" ]. When using the exec form and executing a shell directly, as in the case for the shell form, it is the shell that is doing the environment variable expansion, not docker.

Shell form ENTRYPOINT example

You can specify a plain string for the ENTRYPOINT and it will execute in /bin/sh -c. This form will use shell processing to substitute shell environment variables, and will ignore any CMD or docker run command line arguments. To ensure that docker stop will signal any long running ENTRYPOINT executable correctly, you need to remember to start it with exec:

FROM ubuntu
+ENTRYPOINT exec top -b
+

When you run this image, you’ll see the single PID 1 process:

$ docker run -it --rm --name test top
+
+Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached
+CPU:   5% usr   0% sys   0% nic  94% idle   0% io   0% irq   0% sirq
+Load average: 0.08 0.03 0.05 2/98 6
+  PID  PPID USER     STAT   VSZ %VSZ %CPU COMMAND
+    1     0 root     R     3164   0%   0% top -b
+

Which exits cleanly on docker stop:

$ /usr/bin/time docker stop test
+
+test
+real	0m 0.20s
+user	0m 0.02s
+sys	0m 0.04s
+

If you forget to add exec to the beginning of your ENTRYPOINT:

FROM ubuntu
+ENTRYPOINT top -b
+CMD -- --ignored-param1
+

You can then run it (giving it a name for the next step):

$ docker run -it --name test top --ignored-param2
+
+top - 13:58:24 up 17 min,  0 users,  load average: 0.00, 0.00, 0.00
+Tasks:   2 total,   1 running,   1 sleeping,   0 stopped,   0 zombie
+%Cpu(s): 16.7 us, 33.3 sy,  0.0 ni, 50.0 id,  0.0 wa,  0.0 hi,  0.0 si,  0.0 st
+MiB Mem :   1990.8 total,   1354.6 free,    231.4 used,    404.7 buff/cache
+MiB Swap:   1024.0 total,   1024.0 free,      0.0 used.   1639.8 avail Mem
+
+  PID USER      PR  NI    VIRT    RES    SHR S  %CPU  %MEM     TIME+ COMMAND
+    1 root      20   0    2612    604    536 S   0.0   0.0   0:00.02 sh
+    6 root      20   0    5956   3188   2768 R   0.0   0.2   0:00.00 top
+

You can see from the output of top that the specified ENTRYPOINT is not PID 1.

If you then run docker stop test, the container will not exit cleanly - the stop command will be forced to send a SIGKILL after the timeout:

$ docker exec -it test ps waux
+
+USER       PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
+root         1  0.4  0.0   2612   604 pts/0    Ss+  13:58   0:00 /bin/sh -c top -b --ignored-param2
+root         6  0.0  0.1   5956  3188 pts/0    S+   13:58   0:00 top -b
+root         7  0.0  0.1   5884  2816 pts/1    Rs+  13:58   0:00 ps waux
+
+$ /usr/bin/time docker stop test
+
+test
+real	0m 10.19s
+user	0m 0.04s
+sys	0m 0.03s
+

Understand how CMD and ENTRYPOINT interact

Both CMD and ENTRYPOINT instructions define what command gets executed when running a container. There are few rules that describe their co-operation.

  1. Dockerfile should specify at least one of CMD or ENTRYPOINT commands.

  2. ENTRYPOINT should be defined when using the container as an executable.

  3. CMD should be used as a way of defining default arguments for an ENTRYPOINT command or for executing an ad-hoc command in a container.

  4. CMD will be overridden when running the container with alternative arguments.

The table below shows what command is executed for different ENTRYPOINT / CMD combinations:

No ENTRYPOINT ENTRYPOINT exec_entry p1_entry ENTRYPOINT [“exec_entry”, “p1_entry”]
No CMD error, not allowed /bin/sh -c exec_entry p1_entry exec_entry p1_entry
CMD [“exec_cmd”, “p1_cmd”] exec_cmd p1_cmd /bin/sh -c exec_entry p1_entry exec_entry p1_entry exec_cmd p1_cmd
CMD [“p1_cmd”, “p2_cmd”] p1_cmd p2_cmd /bin/sh -c exec_entry p1_entry exec_entry p1_entry p1_cmd p2_cmd
CMD exec_cmd p1_cmd /bin/sh -c exec_cmd p1_cmd /bin/sh -c exec_entry p1_entry exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd

Note

If CMD is defined from the base image, setting ENTRYPOINT will reset CMD to an empty value. In this scenario, CMD must be defined in the current image to have a value.

VOLUME

VOLUME ["/data"]
+

The VOLUME instruction creates a mount point with the specified name and marks it as holding externally mounted volumes from native host or other containers. The value can be a JSON array, VOLUME ["/var/log/"], or a plain string with multiple arguments, such as VOLUME /var/log or VOLUME /var/log /var/db. For more information/examples and mounting instructions via the Docker client, refer to Share Directories via Volumes documentation.

The docker run command initializes the newly created volume with any data that exists at the specified location within the base image. For example, consider the following Dockerfile snippet:

FROM ubuntu
+RUN mkdir /myvol
+RUN echo "hello world" > /myvol/greeting
+VOLUME /myvol
+

This Dockerfile results in an image that causes docker run to create a new mount point at /myvol and copy the greeting file into the newly created volume.

Notes about specifying volumes

Keep the following things in mind about volumes in the Dockerfile.

USER

USER <user>[:<group>]
+

or

USER <UID>[:<GID>]
+

The USER instruction sets the user name (or UID) and optionally the user group (or GID) to use when running the image and for any RUN, CMD and ENTRYPOINT instructions that follow it in the Dockerfile.

Note that when specifying a group for the user, the user will have only the specified group membership. Any other configured group memberships will be ignored.

Warning

When the user doesn’t have a primary group then the image (or the next instructions) will be run with the root group.

On Windows, the user must be created first if it’s not a built-in account. This can be done with the net user command called as part of a Dockerfile.

FROM microsoft/windowsservercore
+# Create Windows user in the container
+RUN net user /add patrick
+# Set it for subsequent commands
+USER patrick
+

WORKDIR

WORKDIR /path/to/workdir
+

The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile. If the WORKDIR doesn’t exist, it will be created even if it’s not used in any subsequent Dockerfile instruction.

The WORKDIR instruction can be used multiple times in a Dockerfile. If a relative path is provided, it will be relative to the path of the previous WORKDIR instruction. For example:

WORKDIR /a
+WORKDIR b
+WORKDIR c
+RUN pwd
+

The output of the final pwd command in this Dockerfile would be /a/b/c.

The WORKDIR instruction can resolve environment variables previously set using ENV. You can only use environment variables explicitly set in the Dockerfile. For example:

ENV DIRPATH=/path
+WORKDIR $DIRPATH/$DIRNAME
+RUN pwd
+

The output of the final pwd command in this Dockerfile would be /path/$DIRNAME

If not specified, the default working directory is /. In practice, if you aren’t building a Dockerfile from scratch (FROM scratch), the WORKDIR may likely be set by the base image you’re using.

Therefore, to avoid unintended operations in unknown directories, it is best practice to set your WORKDIR explicitly.

ARG

ARG <name>[=<default value>]
+

The ARG instruction defines a variable that users can pass at build-time to the builder with the docker build command using the --build-arg <varname>=<value> flag. If a user specifies a build argument that was not defined in the Dockerfile, the build outputs a warning.

[Warning] One or more build-args [foo] were not consumed.
+

A Dockerfile may include one or more ARG instructions. For example, the following is a valid Dockerfile:

FROM busybox
+ARG user1
+ARG buildno
+# ...
+

Warning:

It is not recommended to use build-time variables for passing secrets like github keys, user credentials etc. Build-time variable values are visible to any user of the image with the docker history command.

Refer to the “build images with BuildKit” section to learn about secure ways to use secrets when building images.

Default values

An ARG instruction can optionally include a default value:

FROM busybox
+ARG user1=someuser
+ARG buildno=1
+# ...
+

If an ARG instruction has a default value and if there is no value passed at build-time, the builder uses the default.

Scope

An ARG variable definition comes into effect from the line on which it is defined in the Dockerfile not from the argument’s use on the command-line or elsewhere. For example, consider this Dockerfile:

FROM busybox
+USER ${user:-some_user}
+ARG user
+USER $user
+# ...
+

A user builds this file by calling:

$ docker build --build-arg user=what_user .
+

The USER at line 2 evaluates to some_user as the user variable is defined on the subsequent line 3. The USER at line 4 evaluates to what_user as user is defined and the what_user value was passed on the command line. Prior to its definition by an ARG instruction, any use of a variable results in an empty string.

An ARG instruction goes out of scope at the end of the build stage where it was defined. To use an arg in multiple stages, each stage must include the ARG instruction.

FROM busybox
+ARG SETTINGS
+RUN ./run/setup $SETTINGS
+
+FROM busybox
+ARG SETTINGS
+RUN ./run/other $SETTINGS
+

Using ARG variables

You can use an ARG or an ENV instruction to specify variables that are available to the RUN instruction. Environment variables defined using the ENV instruction always override an ARG instruction of the same name. Consider this Dockerfile with an ENV and ARG instruction.

FROM ubuntu
+ARG CONT_IMG_VER
+ENV CONT_IMG_VER=v1.0.0
+RUN echo $CONT_IMG_VER
+

Then, assume this image is built with this command:

$ docker build --build-arg CONT_IMG_VER=v2.0.1 .
+

In this case, the RUN instruction uses v1.0.0 instead of the ARG setting passed by the user:v2.0.1 This behavior is similar to a shell script where a locally scoped variable overrides the variables passed as arguments or inherited from environment, from its point of definition.

Using the example above but a different ENV specification you can create more useful interactions between ARG and ENV instructions:

FROM ubuntu
+ARG CONT_IMG_VER
+ENV CONT_IMG_VER=${CONT_IMG_VER:-v1.0.0}
+RUN echo $CONT_IMG_VER
+

Unlike an ARG instruction, ENV values are always persisted in the built image. Consider a docker build without the --build-arg flag:

$ docker build .
+

Using this Dockerfile example, CONT_IMG_VER is still persisted in the image but its value would be v1.0.0 as it is the default set in line 3 by the ENV instruction.

The variable expansion technique in this example allows you to pass arguments from the command line and persist them in the final image by leveraging the ENV instruction. Variable expansion is only supported for a limited set of Dockerfile instructions.

Predefined ARGs

Docker has a set of predefined ARG variables that you can use without a corresponding ARG instruction in the Dockerfile.

To use these, pass them on the command line using the --build-arg flag, for example:

$ docker build --build-arg HTTPS_PROXY=https://my-proxy.example.com .
+

By default, these pre-defined variables are excluded from the output of docker history. Excluding them reduces the risk of accidentally leaking sensitive authentication information in an HTTP_PROXY variable.

For example, consider building the following Dockerfile using --build-arg HTTP_PROXY=http://user:pass@proxy.lon.example.com

FROM ubuntu
+RUN echo "Hello World"
+

In this case, the value of the HTTP_PROXY variable is not available in the docker history and is not cached. If you were to change location, and your proxy server changed to http://user:pass@proxy.sfo.example.com, a subsequent build does not result in a cache miss.

If you need to override this behaviour then you may do so by adding an ARG statement in the Dockerfile as follows:

FROM ubuntu
+ARG HTTP_PROXY
+RUN echo "Hello World"
+

When building this Dockerfile, the HTTP_PROXY is preserved in the docker history, and changing its value invalidates the build cache.

Automatic platform ARGs in the global scope

This feature is only available when using the BuildKit backend.

Docker predefines a set of ARG variables with information on the platform of the node performing the build (build platform) and on the platform of the resulting image (target platform). The target platform can be specified with the --platform flag on docker build.

The following ARG variables are set automatically:

These arguments are defined in the global scope so are not automatically available inside build stages or for your RUN commands. To expose one of these arguments inside the build stage redefine it without value.

For example:

FROM alpine
+ARG TARGETPLATFORM
+RUN echo "I'm building for $TARGETPLATFORM"
+

Impact on build caching

ARG variables are not persisted into the built image as ENV variables are. However, ARG variables do impact the build cache in similar ways. If a Dockerfile defines an ARG variable whose value is different from a previous build, then a “cache miss” occurs upon its first usage, not its definition. In particular, all RUN instructions following an ARG instruction use the ARG variable implicitly (as an environment variable), thus can cause a cache miss. All predefined ARG variables are exempt from caching unless there is a matching ARG statement in the Dockerfile.

For example, consider these two Dockerfile:

FROM ubuntu
+ARG CONT_IMG_VER
+RUN echo $CONT_IMG_VER
+
FROM ubuntu
+ARG CONT_IMG_VER
+RUN echo hello
+

If you specify --build-arg CONT_IMG_VER=<value> on the command line, in both cases, the specification on line 2 does not cause a cache miss; line 3 does cause a cache miss.ARG CONT_IMG_VER causes the RUN line to be identified as the same as running CONT_IMG_VER=<value> echo hello, so if the <value> changes, we get a cache miss.

Consider another example under the same command line:

FROM ubuntu
+ARG CONT_IMG_VER
+ENV CONT_IMG_VER=$CONT_IMG_VER
+RUN echo $CONT_IMG_VER
+

In this example, the cache miss occurs on line 3. The miss happens because the variable’s value in the ENV references the ARG variable and that variable is changed through the command line. In this example, the ENV command causes the image to include the value.

If an ENV instruction overrides an ARG instruction of the same name, like this Dockerfile:

FROM ubuntu
+ARG CONT_IMG_VER
+ENV CONT_IMG_VER=hello
+RUN echo $CONT_IMG_VER
+

Line 3 does not cause a cache miss because the value of CONT_IMG_VER is a constant (hello). As a result, the environment variables and values used on the RUN (line 4) doesn’t change between builds.

ONBUILD

ONBUILD <INSTRUCTION>
+

The ONBUILD instruction adds to the image a trigger instruction to be executed at a later time, when the image is used as the base for another build. The trigger will be executed in the context of the downstream build, as if it had been inserted immediately after the FROM instruction in the downstream Dockerfile.

Any build instruction can be registered as a trigger.

This is useful if you are building an image which will be used as a base to build other images, for example an application build environment or a daemon which may be customized with user-specific configuration.

For example, if your image is a reusable Python application builder, it will require application source code to be added in a particular directory, and it might require a build script to be called after that. You can’t just call ADD and RUN now, because you don’t yet have access to the application source code, and it will be different for each application build. You could simply provide application developers with a boilerplate Dockerfile to copy-paste into their application, but that is inefficient, error-prone and difficult to update because it mixes with application-specific code.

The solution is to use ONBUILD to register advance instructions to run later, during the next build stage.

Here’s how it works:

  1. When it encounters an ONBUILD instruction, the builder adds a trigger to the metadata of the image being built. The instruction does not otherwise affect the current build.
  2. At the end of the build, a list of all triggers is stored in the image manifest, under the key OnBuild. They can be inspected with the docker inspect command.
  3. Later the image may be used as a base for a new build, using the FROM instruction. As part of processing the FROM instruction, the downstream builder looks for ONBUILD triggers, and executes them in the same order they were registered. If any of the triggers fail, the FROM instruction is aborted which in turn causes the build to fail. If all triggers succeed, the FROM instruction completes and the build continues as usual.
  4. Triggers are cleared from the final image after being executed. In other words they are not inherited by “grand-children” builds.

For example you might add something like this:

ONBUILD ADD . /app/src
+ONBUILD RUN /usr/local/bin/python-build --dir /app/src
+

Warning

Chaining ONBUILD instructions using ONBUILD ONBUILD isn’t allowed.

Warning

The ONBUILD instruction may not trigger FROM or MAINTAINER instructions.

STOPSIGNAL

STOPSIGNAL signal
+

The STOPSIGNAL instruction sets the system call signal that will be sent to the container to exit. This signal can be a signal name in the format SIG<NAME>, for instance SIGKILL, or an unsigned number that matches a position in the kernel’s syscall table, for instance 9. The default is SIGTERM if not defined.

The image’s default stopsignal can be overridden per container, using the --stop-signal flag on docker run and docker create.

HEALTHCHECK

The HEALTHCHECK instruction has two forms:

The HEALTHCHECK instruction tells Docker how to test a container to check that it is still working. This can detect cases such as a web server that is stuck in an infinite loop and unable to handle new connections, even though the server process is still running.

When a container has a healthcheck specified, it has a health status in addition to its normal status. This status is initially starting. Whenever a health check passes, it becomes healthy (whatever state it was previously in). After a certain number of consecutive failures, it becomes unhealthy.

The options that can appear before CMD are:

The health check will first run interval seconds after the container is started, and then again interval seconds after each previous check completes.

If a single run of the check takes longer than timeout seconds then the check is considered to have failed.

It takes retries consecutive failures of the health check for the container to be considered unhealthy.

start period provides initialization time for containers that need time to bootstrap. Probe failure during that period will not be counted towards the maximum number of retries. However, if a health check succeeds during the start period, the container is considered started and all consecutive failures will be counted towards the maximum number of retries.

There can only be one HEALTHCHECK instruction in a Dockerfile. If you list more than one then only the last HEALTHCHECK will take effect.

The command after the CMD keyword can be either a shell command (e.g. HEALTHCHECK CMD /bin/check-running) or an exec array (as with other Dockerfile commands; see e.g. ENTRYPOINT for details).

The command’s exit status indicates the health status of the container. The possible values are:

For example, to check every five minutes or so that a web-server is able to serve the site’s main page within three seconds:

HEALTHCHECK --interval=5m --timeout=3s \
+  CMD curl -f http://localhost/ || exit 1
+

To help debug failing probes, any output text (UTF-8 encoded) that the command writes on stdout or stderr will be stored in the health status and can be queried with docker inspect. Such output should be kept short (only the first 4096 bytes are stored currently).

When the health status of a container changes, a health_status event is generated with the new status.

SHELL

SHELL ["executable", "parameters"]
+

The SHELL instruction allows the default shell used for the shell form of commands to be overridden. The default shell on Linux is ["/bin/sh", "-c"], and on Windows is ["cmd", "/S", "/C"]. The SHELL instruction must be written in JSON form in a Dockerfile.

The SHELL instruction is particularly useful on Windows where there are two commonly used and quite different native shells: cmd and powershell, as well as alternate shells available including sh.

The SHELL instruction can appear multiple times. Each SHELL instruction overrides all previous SHELL instructions, and affects all subsequent instructions. For example:

FROM microsoft/windowsservercore
+
+# Executed as cmd /S /C echo default
+RUN echo default
+
+# Executed as cmd /S /C powershell -command Write-Host default
+RUN powershell -command Write-Host default
+
+# Executed as powershell -command Write-Host hello
+SHELL ["powershell", "-command"]
+RUN Write-Host hello
+
+# Executed as cmd /S /C echo hello
+SHELL ["cmd", "/S", "/C"]
+RUN echo hello
+

The following instructions can be affected by the SHELL instruction when the shell form of them is used in a Dockerfile: RUN, CMD and ENTRYPOINT.

The following example is a common pattern found on Windows which can be streamlined by using the SHELL instruction:

RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt"
+

The command invoked by docker will be:

cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt"
+

This is inefficient for two reasons. First, there is an un-necessary cmd.exe command processor (aka shell) being invoked. Second, each RUN instruction in the shell form requires an extra powershell -command prefixing the command.

To make this more efficient, one of two mechanisms can be employed. One is to use the JSON form of the RUN command such as:

RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""]
+

While the JSON form is unambiguous and does not use the un-necessary cmd.exe, it does require more verbosity through double-quoting and escaping. The alternate mechanism is to use the SHELL instruction and the shell form, making a more natural syntax for Windows users, especially when combined with the escape parser directive:

# escape=`
+
+FROM microsoft/nanoserver
+SHELL ["powershell","-command"]
+RUN New-Item -ItemType Directory C:\Example
+ADD Execute-MyCmdlet.ps1 c:\example\
+RUN c:\example\Execute-MyCmdlet -sample 'hello world'
+

Resulting in:

PS E:\myproject> docker build -t shell .
+
+Sending build context to Docker daemon 4.096 kB
+Step 1/5 : FROM microsoft/nanoserver
+ ---> 22738ff49c6d
+Step 2/5 : SHELL powershell -command
+ ---> Running in 6fcdb6855ae2
+ ---> 6331462d4300
+Removing intermediate container 6fcdb6855ae2
+Step 3/5 : RUN New-Item -ItemType Directory C:\Example
+ ---> Running in d0eef8386e97
+
+
+    Directory: C:\
+
+
+Mode         LastWriteTime              Length Name
+----         -------------              ------ ----
+d-----       10/28/2016  11:26 AM              Example
+
+
+ ---> 3f2fbf1395d9
+Removing intermediate container d0eef8386e97
+Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\
+ ---> a955b2621c31
+Removing intermediate container b825593d39fc
+Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world'
+ ---> Running in be6d8e63fe75
+hello world
+ ---> 8e559e9bf424
+Removing intermediate container be6d8e63fe75
+Successfully built 8e559e9bf424
+PS E:\myproject>
+

The SHELL instruction could also be used to modify the way in which a shell operates. For example, using SHELL cmd /S /C /V:ON|OFF on Windows, delayed environment variable expansion semantics could be modified.

The SHELL instruction can also be used on Linux should an alternate shell be required such as zsh, csh, tcsh and others.

Dockerfile examples

For examples of Dockerfiles, refer to:

+

builder, docker, Dockerfile, automation, image creation

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/builder/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fattach%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fattach%2Findex.html new file mode 100644 index 00000000..12a221c7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fattach%2Findex.html @@ -0,0 +1,65 @@ +

docker attach


Attach local standard input, output, and error streams to a running container

Usage

$ docker attach [OPTIONS] CONTAINER
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Use docker attach to attach your terminal’s standard input, output, and error (or any combination of the three) to a running container using the container’s ID or name. This allows you to view its ongoing output or to control it interactively, as though the commands were running directly in your terminal.

Note: The attach command will display the output of the ENTRYPOINT/CMD process. This can appear as if the attach command is hung when in fact the process may simply not be interacting with the terminal at that time.

You can attach to the same contained process multiple times simultaneously, from different sessions on the Docker host.

To stop a container, use CTRL-c. This key sequence sends SIGKILL to the container. If --sig-proxy is true (the default),CTRL-c sends a SIGINT to the container. If the container was run with -i and -t, you can detach from a container and leave it running using the CTRL-p CTRL-q key sequence.

Note: A process running as PID 1 inside a container is treated specially by Linux: it ignores any signal with the default action. So, the process will not terminate on SIGINT or SIGTERM unless it is coded to do so.

It is forbidden to redirect the standard input of a docker attach command while attaching to a tty-enabled container (i.e.: launched with -t).

While a client is connected to container’s stdio using docker attach, Docker uses a ~1MB memory buffer to maximize the throughput of the application. If this buffer is filled, the speed of the API connection will start to have an effect on the process output writing speed. This is similar to other applications like SSH. Because of this, it is not recommended to run performance critical applications that generate a lot of output in the foreground over a slow client connection. Instead, users should use the docker logs command to get access to the logs.

Override the detach sequence

If you want, you can configure an override the Docker key sequence for detach. This is useful if the Docker default sequence conflicts with key sequence you use for other applications. There are two ways to define your own detach key sequence, as a per-container override or as a configuration property on your entire configuration.

To override the sequence for an individual container, use the --detach-keys="<sequence>" flag with the docker attach command. The format of the <sequence> is either a letter [a-Z], or the ctrl- combined with any of the following:

These a, ctrl-a, X, or ctrl-\\ values are all examples of valid key sequences. To configure a different configuration default key sequence for all containers, see Configuration file section.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--detach-keys Override the key sequence for detaching a container
--no-stdin Do not attach STDIN
--sig-proxy true Proxy all received signals to the process

Examples

Attach to and detach from a running container

$ docker run -d --name topdemo ubuntu /usr/bin/top -b
+
+$ docker attach topdemo
+
+top - 02:05:52 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+Cpu(s):  0.1%us,  0.2%sy,  0.0%ni, 99.7%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+Mem:    373572k total,   355560k used,    18012k free,    27872k buffers
+Swap:   786428k total,        0k used,   786428k free,   221740k cached
+
+PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+ 1 root      20   0 17200 1116  912 R    0  0.3   0:00.03 top
+
+ top - 02:05:55 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+ Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+ Cpu(s):  0.0%us,  0.2%sy,  0.0%ni, 99.8%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+ Mem:    373572k total,   355244k used,    18328k free,    27872k buffers
+ Swap:   786428k total,        0k used,   786428k free,   221776k cached
+
+   PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+       1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top
+
+
+ top - 02:05:58 up  3:06,  0 users,  load average: 0.01, 0.02, 0.05
+ Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+ Cpu(s):  0.2%us,  0.3%sy,  0.0%ni, 99.5%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+ Mem:    373572k total,   355780k used,    17792k free,    27880k buffers
+ Swap:   786428k total,        0k used,   786428k free,   221776k cached
+
+ PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+      1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top
+^C$
+
+$ echo $?
+0
+$ docker ps -a | grep topdemo
+
+7998ac8581f9        ubuntu:14.04        "/usr/bin/top -b"   38 seconds ago      Exited (0) 21 seconds ago                          topdemo
+

Get the exit code of the container’s command

And in this second example, you can see the exit code returned by the bash process is returned by the docker attach command to its caller too:

$ docker run --name test -d -it debian
+275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab
+
+$ docker attach test
+root@f38c87f2a42d:/# exit 13
+
+exit
+
+$ echo $?
+13
+
+$ docker ps -a | grep test
+
+275c44472aeb        debian:7            "/bin/bash"         26 seconds ago      Exited (13) 17 seconds ago                         test
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/attach/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuild%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuild%2Findex.html new file mode 100644 index 00000000..ad2eb257 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuild%2Findex.html @@ -0,0 +1,180 @@ +

docker build


Build an image from a Dockerfile

Usage

$ docker build [OPTIONS] PATH | URL | -
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker build command builds Docker images from a Dockerfile and a “context”. A build’s context is the set of files located in the specified PATH or URL. The build process can refer to any of the files in the context. For example, your build can use a COPY instruction to reference a file in the context.

The URL parameter can refer to three kinds of resources: Git repositories, pre-packaged tarball contexts and plain text files.

Git repositories

When the URL parameter points to the location of a Git repository, the repository acts as the build context. The system recursively fetches the repository and its submodules. The commit history is not preserved. A repository is first pulled into a temporary directory on your local host. After that succeeds, the directory is sent to the Docker daemon as the context. Local copy gives you the ability to access private repositories using local user credentials, VPN’s, and so forth.

Note

If the URL parameter contains a fragment the system will recursively clone the repository and its submodules using a git clone --recursive command.

Git URLs accept context configuration in their fragment section, separated by a colon (:). The first part represents the reference that Git will check out, and can be either a branch, a tag, or a remote reference. The second part represents a subdirectory inside the repository that will be used as a build context.

For example, run this command to use a directory called docker in the branch container:

$ docker build https://github.com/docker/rootfs.git#container:docker
+

The following table represents all the valid suffixes with their build contexts:

Build Syntax Suffix Commit Used Build Context Used
myrepo.git refs/heads/master /
myrepo.git#mytag refs/tags/mytag /
myrepo.git#mybranch refs/heads/mybranch /
myrepo.git#pull/42/head refs/pull/42/head /
myrepo.git#:myfolder refs/heads/master /myfolder
myrepo.git#master:myfolder refs/heads/master /myfolder
myrepo.git#mytag:myfolder refs/tags/mytag /myfolder
myrepo.git#mybranch:myfolder refs/heads/mybranch /myfolder

Note

You cannot specify the build-context directory (myfolder in the examples above) when using BuildKit as builder (DOCKER_BUILDKIT=1). Support for this feature is tracked in buildkit#1684.

Tarball contexts

If you pass an URL to a remote tarball, the URL itself is sent to the daemon:

$ docker build http://server/context.tar.gz
+

The download operation will be performed on the host the Docker daemon is running on, which is not necessarily the same host from which the build command is being issued. The Docker daemon will fetch context.tar.gz and use it as the build context. Tarball contexts must be tar archives conforming to the standard tar UNIX format and can be compressed with any one of the ‘xz’, ‘bzip2’, ‘gzip’ or ‘identity’ (no compression) formats.

Text files

Instead of specifying a context, you can pass a single Dockerfile in the URL or pipe the file in via STDIN. To pipe a Dockerfile from STDIN:

$ docker build - < Dockerfile
+

With Powershell on Windows, you can run:

Get-Content Dockerfile | docker build -
+

If you use STDIN or specify a URL pointing to a plain text file, the system places the contents into a file called Dockerfile, and any -f, --file option is ignored. In this scenario, there is no context.

By default the docker build command will look for a Dockerfile at the root of the build context. The -f, --file, option lets you specify the path to an alternative file to use instead. This is useful in cases where the same set of files are used for multiple builds. The path must be to a file within the build context. If a relative path is specified then it is interpreted as relative to the root of the context.

In most cases, it’s best to put each Dockerfile in an empty directory. Then, add to that directory only the files needed for building the Dockerfile. To increase the build’s performance, you can exclude files and directories by adding a .dockerignore file to that directory as well. For information on creating one, see the .dockerignore file.

If the Docker client loses connection to the daemon, the build is canceled. This happens if you interrupt the Docker client with CTRL-c or if the Docker client is killed for any reason. If the build initiated a pull which is still running at the time the build is cancelled, the pull is cancelled as well.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--add-host Add a custom host-to-IP mapping (host:ip)
--build-arg Set build-time variables
--cache-from Images to consider as cache sources
--cgroup-parent Optional parent cgroup for the container
--compress Compress the build context using gzip
--cpu-period Limit the CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit the CPU CFS (Completely Fair Scheduler) quota
+--cpu-shares , -c + CPU shares (relative weight)
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
--disable-content-trust true Skip image verification
+--file , -f + Name of the Dockerfile (Default is 'PATH/Dockerfile')
--force-rm Always remove intermediate containers
--iidfile Write the image ID to the file
--isolation Container isolation technology
--label Set metadata for an image
+--memory , -m + Memory limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--network Set the networking mode for the RUN instructions during build
--no-cache Do not use cache when building the image
+--output , -o + +API 1.40+
Output destination (format: type=local,dest=path)
--platform +API 1.40+
Set platform if server is multi-platform capable
--progress auto Set type of progress output (auto, plain, tty). Use plain to show container output
--pull Always attempt to pull a newer version of the image
+--quiet , -q + Suppress the build output and print image ID on success
--rm true Remove intermediate containers after a successful build
--secret Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret
--security-opt Security options
--shm-size Size of /dev/shm
--squash +experimental (daemon)
Squash newly built layers into a single new layer
--ssh SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])
--stream Stream attaches to server to negotiate build context
+--tag , -t + Name and optionally a tag in the 'name:tag' format
--target Set the target build stage to build.
--ulimit Ulimit options

Examples

Build with PATH

$ docker build .
+
+Uploading context 10240 bytes
+Step 1/3 : FROM busybox
+Pulling repository busybox
+ ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/
+Step 2/3 : RUN ls -lh /
+ ---> Running in 9c9e81692ae9
+total 24
+drwxr-xr-x    2 root     root        4.0K Mar 12  2013 bin
+drwxr-xr-x    5 root     root        4.0K Oct 19 00:19 dev
+drwxr-xr-x    2 root     root        4.0K Oct 19 00:19 etc
+drwxr-xr-x    2 root     root        4.0K Nov 15 23:34 lib
+lrwxrwxrwx    1 root     root           3 Mar 12  2013 lib64 -> lib
+dr-xr-xr-x  116 root     root           0 Nov 15 23:34 proc
+lrwxrwxrwx    1 root     root           3 Mar 12  2013 sbin -> bin
+dr-xr-xr-x   13 root     root           0 Nov 15 23:34 sys
+drwxr-xr-x    2 root     root        4.0K Mar 12  2013 tmp
+drwxr-xr-x    2 root     root        4.0K Nov 15 23:34 usr
+ ---> b35f4035db3f
+Step 3/3 : CMD echo Hello world
+ ---> Running in 02071fceb21b
+ ---> f52f38b7823e
+Successfully built f52f38b7823e
+Removing intermediate container 9c9e81692ae9
+Removing intermediate container 02071fceb21b
+

This example specifies that the PATH is ., and so all the files in the local directory get tard and sent to the Docker daemon. The PATH specifies where to find the files for the “context” of the build on the Docker daemon. Remember that the daemon could be running on a remote machine and that no parsing of the Dockerfile happens at the client side (where you’re running docker build). That means that all the files at PATH get sent, not just the ones listed to ADD in the Dockerfile.

The transfer of context from the local machine to the Docker daemon is what the docker client means when you see the “Sending build context” message.

If you wish to keep the intermediate containers after the build is complete, you must use --rm=false. This does not affect the build cache.

Build with URL

$ docker build github.com/creack/docker-firefox
+

This will clone the GitHub repository and use the cloned repository as context. The Dockerfile at the root of the repository is used as Dockerfile. You can specify an arbitrary Git repository by using the git:// or git@ scheme.

$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz
+
+Downloading context: http://server/ctx.tar.gz [===================>]    240 B/240 B
+Step 1/3 : FROM busybox
+ ---> 8c2e06607696
+Step 2/3 : ADD ctx/container.cfg /
+ ---> e7829950cee3
+Removing intermediate container b35224abf821
+Step 3/3 : CMD /bin/ls
+ ---> Running in fbc63d321d73
+ ---> 3286931702ad
+Removing intermediate container fbc63d321d73
+Successfully built 377c409b35e4
+

This sends the URL http://server/ctx.tar.gz to the Docker daemon, which downloads and extracts the referenced tarball. The -f ctx/Dockerfile parameter specifies a path inside ctx.tar.gz to the Dockerfile that is used to build the image. Any ADD commands in that Dockerfile that refers to local paths must be relative to the root of the contents inside ctx.tar.gz. In the example above, the tarball contains a directory ctx/, so the ADD ctx/container.cfg / operation works as expected.

Build with -

$ docker build - < Dockerfile
+

This will read a Dockerfile from STDIN without context. Due to the lack of a context, no contents of any local directory will be sent to the Docker daemon. Since there is no context, a Dockerfile ADD only works if it refers to a remote URL.

$ docker build - < context.tar.gz
+

This will build an image for a compressed context read from STDIN. Supported formats are: bzip2, gzip and xz.

Use a .dockerignore file

$ docker build .
+
+Uploading context 18.829 MB
+Uploading context
+Step 1/2 : FROM busybox
+ ---> 769b9341d937
+Step 2/2 : CMD echo Hello world
+ ---> Using cache
+ ---> 99cc1ad10469
+Successfully built 99cc1ad10469
+$ echo ".git" > .dockerignore
+$ docker build .
+Uploading context  6.76 MB
+Uploading context
+Step 1/2 : FROM busybox
+ ---> 769b9341d937
+Step 2/2 : CMD echo Hello world
+ ---> Using cache
+ ---> 99cc1ad10469
+Successfully built 99cc1ad10469
+

This example shows the use of the .dockerignore file to exclude the .git directory from the context. Its effect can be seen in the changed size of the uploaded context. The builder reference contains detailed information on creating a .dockerignore file.

When using the BuildKit backend, docker build searches for a .dockerignore file relative to the Dockerfile name. For example, running docker build -f myapp.Dockerfile . will first look for an ignore file named myapp.Dockerfile.dockerignore. If such a file is not found, the .dockerignore file is used if present. Using a Dockerfile based .dockerignore is useful if a project contains multiple Dockerfiles that expect to ignore different sets of files.

Tag an image (-t)

$ docker build -t vieux/apache:2.0 .
+

This will build like the previous example, but it will then tag the resulting image. The repository name will be vieux/apache and the tag will be 2.0. Read more about valid tags.

You can apply multiple tags to an image. For example, you can apply the latest tag to a newly built image and add another tag that references a specific version. For example, to tag an image both as whenry/fedora-jboss:latest and whenry/fedora-jboss:v2.1, use the following:

$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 .
+

Specify a Dockerfile (-f)

$ docker build -f Dockerfile.debug .
+

This will use a file called Dockerfile.debug for the build instructions instead of Dockerfile.

$ curl example.com/remote/Dockerfile | docker build -f - .
+

The above command will use the current directory as the build context and read a Dockerfile from stdin.

$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug .
+$ docker build -f dockerfiles/Dockerfile.prod  -t myapp_prod .
+

The above commands will build the current build context (as specified by the .) twice, once using a debug version of a Dockerfile and once using a production version.

$ cd /home/me/myapp/some/dir/really/deep
+$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp
+$ docker build -f ../../../../dockerfiles/debug /home/me/myapp
+

These two docker build commands do the exact same thing. They both use the contents of the debug file instead of looking for a Dockerfile and will use /home/me/myapp as the root of the build context. Note that debug is in the directory structure of the build context, regardless of how you refer to it on the command line.

Note

docker build returns a no such file or directory error if the file or directory does not exist in the uploaded context. This may happen if there is no context, or if you specify a file that is elsewhere on the Host system. The context is limited to the current directory (and its children) for security reasons, and to ensure repeatable builds on remote Docker hosts. This is also the reason why ADD ../file does not work.

Use a custom parent cgroup (--cgroup-parent)

When docker build is run with the --cgroup-parent option the containers used in the build will be run with the corresponding docker run flag.

Set ulimits in container (--ulimit)

Using the --ulimit option with docker build will cause each build step’s container to be started using those --ulimit flag values.

Set build-time variables (--build-arg)

You can use ENV instructions in a Dockerfile to define variable values. These values persist in the built image. However, often persistence is not what you want. Users want to specify variables differently depending on which host they build an image on.

A good example is http_proxy or source versions for pulling intermediate files. The ARG instruction lets Dockerfile authors define values that users can set at build-time using the --build-arg flag:

$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 --build-arg FTP_PROXY=http://40.50.60.5:4567 .
+

This flag allows you to pass the build-time variables that are accessed like regular environment variables in the RUN instruction of the Dockerfile. Also, these values don’t persist in the intermediate or final images like ENV values do. You must add --build-arg for each build argument.

Using this flag will not alter the output you see when the ARG lines from the Dockerfile are echoed during the build process.

For detailed information on using ARG and ENV instructions, see the Dockerfile reference.

You may also use the --build-arg flag without a value, in which case the value from the local environment will be propagated into the Docker container being built:

$ export HTTP_PROXY=http://10.20.30.2:1234
+$ docker build --build-arg HTTP_PROXY .
+

This is similar to how docker run -e works. Refer to the docker run documentation for more information.

Optional security options (--security-opt)

This flag is only supported on a daemon running on Windows, and only supports the credentialspec option. The credentialspec must be in the format file://spec.txt or registry://keyname.

Specify isolation technology for container (--isolation)

This option is useful in situations where you are running Docker containers on Windows. The --isolation=<value> option sets a container’s isolation technology. On Linux, the only supported is the default option which uses Linux namespaces. On Microsoft Windows, you can specify these values:

Value Description
default Use the value specified by the Docker daemon’s --exec-opt . If the daemon does not specify an isolation technology, Microsoft Windows uses process as its default value.
process Namespace isolation only.
hyperv Hyper-V hypervisor partition-based isolation.

Specifying the --isolation flag without a value is the same as setting --isolation="default".

Add entries to container hosts file (--add-host)

You can add other hosts into a container’s /etc/hosts file by using one or more --add-host flags. This example adds a static address for a host named docker:

$ docker build --add-host=docker:10.180.0.1 .
+

Specifying target build stage (--target)

When building a Dockerfile with multiple build stages, --target can be used to specify an intermediate build stage by name as a final stage for the resulting image. Commands after the target stage will be skipped.

FROM debian AS build-env
+# ...
+
+FROM alpine AS production-env
+# ...
+
$ docker build -t mybuildimage --target build-env .
+

Custom build outputs

By default, a local container image is created from the build result. The --output (or -o) flag allows you to override this behavior, and a specify a custom exporter. For example, custom exporters allow you to export the build artifacts as files on the local filesystem instead of a Docker image, which can be useful for generating local binaries, code generation etc.

The value for --output is a CSV-formatted string defining the exporter type and options. Currently, local and tar exporters are supported. The local exporter writes the resulting build files to a directory on the client side. The tar exporter is similar but writes the files as a single tarball (.tar).

If no type is specified, the value defaults to the output directory of the local exporter. Use a hyphen (-) to write the output tarball to standard output (STDOUT).

The following example builds an image using the current directory (.) as build context, and exports the files to a directory named out in the current directory. If the directory does not exist, Docker creates the directory automatically:

$ docker build -o out .
+

The example above uses the short-hand syntax, omitting the type options, and thus uses the default (local) exporter. The example below shows the equivalent using the long-hand CSV syntax, specifying both type and dest (destination path):

$ docker build --output type=local,dest=out .
+

Use the tar type to export the files as a .tar archive:

$ docker build --output type=tar,dest=out.tar .
+

The example below shows the equivalent when using the short-hand syntax. In this case, - is specified as destination, which automatically selects the tar type, and writes the output tarball to standard output, which is then redirected to the out.tar file:

$ docker build -o - . > out.tar
+

The --output option exports all files from the target stage. A common pattern for exporting only specific files is to do multi-stage builds and to copy the desired files to a new scratch stage with COPY --from.

The example Dockerfile below uses a separate stage to collect the build-artifacts for exporting:

FROM golang AS build-stage
+RUN go get -u github.com/LK4D4/vndr
+
+FROM scratch AS export-stage
+COPY --from=build-stage /go/bin/vndr /
+

When building the Dockerfile with the -o option, only the files from the final stage are exported to the out directory, in this case, the vndr binary:

$ docker build -o out .
+
+[+] Building 2.3s (7/7) FINISHED
+ => [internal] load build definition from Dockerfile                                                                          0.1s
+ => => transferring dockerfile: 176B                                                                                          0.0s
+ => [internal] load .dockerignore                                                                                             0.0s
+ => => transferring context: 2B                                                                                               0.0s
+ => [internal] load metadata for docker.io/library/golang:latest                                                              1.6s
+ => [build-stage 1/2] FROM docker.io/library/golang@sha256:2df96417dca0561bf1027742dcc5b446a18957cd28eba6aa79269f23f1846d3f   0.0s
+ => => resolve docker.io/library/golang@sha256:2df96417dca0561bf1027742dcc5b446a18957cd28eba6aa79269f23f1846d3f               0.0s
+ => CACHED [build-stage 2/2] RUN go get -u github.com/LK4D4/vndr                                                              0.0s
+ => [export-stage 1/1] COPY --from=build-stage /go/bin/vndr /                                                                 0.2s
+ => exporting to client                                                                                                       0.4s
+ => => copying files 10.30MB                                                                                                  0.3s
+
+$ ls ./out
+vndr
+

Note

This feature requires the BuildKit backend. You can either enable BuildKit or use the buildx plugin which provides more output type options.

Specifying external cache sources

In addition to local build cache, the builder can reuse the cache generated from previous builds with the --cache-from flag pointing to an image in the registry.

To use an image as a cache source, cache metadata needs to be written into the image on creation. This can be done by setting --build-arg BUILDKIT_INLINE_CACHE=1 when building the image. After that, the built image can be used as a cache source for subsequent builds.

Upon importing the cache, the builder will only pull the JSON metadata from the registry and determine possible cache hits based on that information. If there is a cache hit, the matched layers are pulled into the local environment.

In addition to images, the cache can also be pulled from special cache manifests generated by buildx or the BuildKit CLI (buildctl). These manifests (when built with the type=registry and mode=max options) allow pulling layer data for intermediate stages in multi-stage builds.

The following example builds an image with inline-cache metadata and pushes it to a registry, then uses the image as a cache source on another machine:

$ docker build -t myname/myapp --build-arg BUILDKIT_INLINE_CACHE=1 .
+$ docker push myname/myapp
+

After pushing the image, the image is used as cache source on another machine. BuildKit automatically pulls the image from the registry if needed.

On another machine:

$ docker build --cache-from myname/myapp .
+

Note

This feature requires the BuildKit backend. You can either enable BuildKit or use the buildx plugin. The previous builder has limited support for reusing cache from pre-pulled images.

Squash an image’s layers (--squash) (experimental)

Overview

Once the image is built, squash the new layers into a new image with a single new layer. Squashing does not destroy any existing image, rather it creates a new image with the content of the squashed layers. This effectively makes it look like all Dockerfile commands were created with a single layer. The build cache is preserved with this method.

The --squash option is an experimental feature, and should not be considered stable.

Squashing layers can be beneficial if your Dockerfile produces multiple layers modifying the same files, for example, files that are created in one step, and removed in another step. For other use-cases, squashing images may actually have a negative impact on performance; when pulling an image consisting of multiple layers, layers can be pulled in parallel, and allows sharing layers between images (saving space).

For most use cases, multi-stage builds are a better alternative, as they give more fine-grained control over your build, and can take advantage of future optimizations in the builder. Refer to the use multi-stage builds section in the userguide for more information.

Known limitations

The --squash option has a number of known limitations:

Prerequisites

The example on this page is using experimental mode in Docker 19.03.

Experimental mode can be enabled by using the --experimental flag when starting the Docker daemon or setting experimental: true in the daemon.json configuration file.

By default, experimental mode is disabled. To see the current configuration of the docker daemon, use the docker version command and check the Experimental line in the Engine section:

Client: Docker Engine - Community
+ Version:           19.03.8
+ API version:       1.40
+ Go version:        go1.12.17
+ Git commit:        afacb8b
+ Built:             Wed Mar 11 01:21:11 2020
+ OS/Arch:           darwin/amd64
+ Experimental:      false
+
+Server: Docker Engine - Community
+ Engine:
+  Version:          19.03.8
+  API version:      1.40 (minimum version 1.12)
+  Go version:       go1.12.17
+  Git commit:       afacb8b
+  Built:            Wed Mar 11 01:29:16 2020
+  OS/Arch:          linux/amd64
+  Experimental:     true
+ [...]
+

To enable experimental mode, users need to restart the docker daemon with the experimental flag enabled.

Enable Docker experimental

To enable experimental features, you need to start the Docker daemon with --experimental flag. You can also enable the daemon flag via /etc/docker/daemon.json, for example:

{
+    "experimental": true
+}
+

Then make sure the experimental flag is enabled:

$ docker version -f '{{.Server.Experimental}}'
+true
+

Build an image with --squash argument

The following is an example of docker build with --squash argument

FROM busybox
+RUN echo hello > /hello
+RUN echo world >> /hello
+RUN touch remove_me /remove_me
+ENV HELLO=world
+RUN rm /remove_me
+

An image named test is built with --squash argument.

$ docker build --squash -t test .
+
+<...>
+

If everything is right, the history looks like this:

$ docker history test
+
+IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
+4e10cb5b4cac        3 seconds ago                                                       12 B                merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb
+<missing>           5 minutes ago       /bin/sh -c rm /remove_me                        0 B
+<missing>           5 minutes ago       /bin/sh -c #(nop) ENV HELLO=world               0 B
+<missing>           5 minutes ago       /bin/sh -c touch remove_me /remove_me           0 B
+<missing>           5 minutes ago       /bin/sh -c echo world >> /hello                 0 B
+<missing>           6 minutes ago       /bin/sh -c echo hello > /hello                  0 B
+<missing>           7 weeks ago         /bin/sh -c #(nop) CMD ["sh"]                    0 B
+<missing>           7 weeks ago         /bin/sh -c #(nop) ADD file:47ca6e777c36a4cfff   1.113 MB
+

We could find that a layer’s name is <missing>, and there is a new layer with COMMENT merge.

Test the image, check for /remove_me being gone, make sure hello\nworld is in /hello, make sure the HELLO environment variable’s value is world.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/build/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder%2Findex.html new file mode 100644 index 00000000..922282c4 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder%2Findex.html @@ -0,0 +1,7 @@ +

docker builder


Manage builds

Usage

$ docker builder COMMAND
+

Child commands

Command Description
docker builder build Build an image from a Dockerfile
docker builder prune Remove build cache
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/builder/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_build%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_build%2Findex.html new file mode 100644 index 00000000..4e6dce75 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_build%2Findex.html @@ -0,0 +1,22 @@ +

docker builder build


Build an image from a Dockerfile

Usage

$ docker builder build [OPTIONS] PATH | URL | -
+

Options

Name, shorthand Default Description
--add-host Add a custom host-to-IP mapping (host:ip)
--build-arg Set build-time variables
--cache-from Images to consider as cache sources
--cgroup-parent Optional parent cgroup for the container
--compress Compress the build context using gzip
--cpu-period Limit the CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit the CPU CFS (Completely Fair Scheduler) quota
+--cpu-shares , -c + CPU shares (relative weight)
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
--disable-content-trust true Skip image verification
+--file , -f + Name of the Dockerfile (Default is 'PATH/Dockerfile')
--force-rm Always remove intermediate containers
--iidfile Write the image ID to the file
--isolation Container isolation technology
--label Set metadata for an image
+--memory , -m + Memory limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--network Set the networking mode for the RUN instructions during build
--no-cache Do not use cache when building the image
+--output , -o + +API 1.40+
Output destination (format: type=local,dest=path)
--platform +API 1.40+
Set platform if server is multi-platform capable
--progress auto Set type of progress output (auto, plain, tty). Use plain to show container output
--pull Always attempt to pull a newer version of the image
+--quiet , -q + Suppress the build output and print image ID on success
--rm true Remove intermediate containers after a successful build
--secret Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret
--security-opt Security options
--shm-size Size of /dev/shm
--squash +experimental (daemon)
Squash newly built layers into a single new layer
--ssh SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])
--stream Stream attaches to server to negotiate build context
+--tag , -t + Name and optionally a tag in the 'name:tag' format
--target Set the target build stage to build.
--ulimit Ulimit options

Parent command

Command Description
docker builder Manage builds
Command Description
docker builder build Build an image from a Dockerfile
docker builder prune Remove build cache
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/builder_build/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_prune%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_prune%2Findex.html new file mode 100644 index 00000000..44887efe --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fbuilder_prune%2Findex.html @@ -0,0 +1,11 @@ +

docker builder prune


Remove build cache

Usage

$ docker builder prune
+

Options

Name, shorthand Default Description
+--all , -a + Remove all unused build cache, not just dangling ones
--filter Provide filter values (e.g. 'until=24h')
+--force , -f + Do not prompt for confirmation
--keep-storage Amount of disk space to keep for cache

Parent command

Command Description
docker builder Manage builds
Command Description
docker builder build Build an image from a Dockerfile
docker builder prune Remove build cache
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/builder_prune/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint%2Findex.html new file mode 100644 index 00000000..fc824e94 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint%2Findex.html @@ -0,0 +1,30 @@ +

docker checkpoint


Manage checkpoints

This command is experimental.

This command is experimental on the Docker daemon. It should not be used in production environments. To enable experimental features on the Docker daemon, edit the daemon.json and set experimental to true.

Experimental features provide early access to future product functionality. These features are intended for testing and feedback only as they may change between releases without warning or can be removed entirely from a future release. Experimental features must not be used in production environments. Docker does not offer support for experimental features.

For a list of current experimental features in the Docker CLI, see Docker CLI Experimental features.

Usage

$ docker checkpoint COMMAND
+

Description

Checkpoint and Restore is an experimental feature that allows you to freeze a running container by checkpointing it, which turns its state into a collection of files on disk. Later, the container can be restored from the point it was frozen.

This is accomplished using a tool called CRIU, which is an external dependency of this feature. A good overview of the history of checkpoint and restore in Docker is available in this Kubernetes blog post.

Installing CRIU

If you use a Debian system, you can add the CRIU PPA and install with apt-get from the criu launchpad.

Alternatively, you can build CRIU from source.

You need at least version 2.0 of CRIU to run checkpoint and restore in Docker.

Use cases for checkpoint and restore

This feature is currently focused on single-host use cases for checkpoint and restore. Here are a few:

Another primary use case of checkpoint and restore outside of Docker is the live migration of a server from one machine to another. This is possible with the current implementation, but not currently a priority (and so the workflow is not optimized for the task).

Using checkpoint and restore

A new top level command docker checkpoint is introduced, with three subcommands:

Additionally, a --checkpoint flag is added to the docker container start command.

The options for docker checkpoint create:

Usage:  docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT
+
+Create a checkpoint from a running container
+
+  --leave-running=false    Leave the container running after checkpoint
+  --checkpoint-dir         Use a custom checkpoint storage directory
+

And to restore a container:

Usage:  docker start --checkpoint CHECKPOINT_ID [OTHER OPTIONS] CONTAINER
+

Example of using checkpoint and restore on a container:

$ docker run --security-opt=seccomp:unconfined --name cr -d busybox /bin/sh -c 'i=0; while true; do echo $i; i=$(expr $i + 1); sleep 1; done'
+abc0123
+
+$ docker checkpoint create cr checkpoint1
+
+# <later>
+$ docker start --checkpoint checkpoint1 cr
+abc0123
+

This process just logs an incrementing counter to stdout. If you run docker logs in between running/checkpoint/restoring you should see that the counter increases while the process is running, stops while it’s checkpointed, and resumes from the point it left off once you restore.

Known limitations

seccomp is only supported by CRIU in very up to date kernels.

External terminal (i.e. docker run -t ..) is not supported at the moment. If you try to create a checkpoint for a container with an external terminal, it would fail:

$ docker checkpoint create cr checkpoint1
+Error response from daemon: Cannot checkpoint container c1: rpc error: code = 2 desc = exit status 1: "criu failed: type NOTIFY errno 0\nlog file: /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log\n"
+
+$ cat /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log
+Error (mount.c:740): mnt: 126:./dev/console doesn't have a proper root mount
+

Child commands

Command Description
docker checkpoint create Create a checkpoint from a running container
docker checkpoint ls List checkpoints for a container
docker checkpoint rm Remove a checkpoint
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/checkpoint/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_create%2Findex.html new file mode 100644 index 00000000..5a965954 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_create%2Findex.html @@ -0,0 +1,7 @@ +

docker checkpoint create


Create a checkpoint from a running container

This command is experimental.

This command is experimental on the Docker daemon. It should not be used in production environments. To enable experimental features on the Docker daemon, edit the daemon.json and set experimental to true.

Experimental features provide early access to future product functionality. These features are intended for testing and feedback only as they may change between releases without warning or can be removed entirely from a future release. Experimental features must not be used in production environments. Docker does not offer support for experimental features.

For a list of current experimental features in the Docker CLI, see Docker CLI Experimental features.

Usage

$ docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT
+

Options

Name, shorthand Default Description
--checkpoint-dir Use a custom checkpoint storage directory
--leave-running Leave the container running after checkpoint

Parent command

Command Description
docker checkpoint Manage checkpoints
Command Description
docker checkpoint create Create a checkpoint from a running container
docker checkpoint ls List checkpoints for a container
docker checkpoint rm Remove a checkpoint
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/checkpoint_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_ls%2Findex.html new file mode 100644 index 00000000..013b15ea --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_ls%2Findex.html @@ -0,0 +1,7 @@ +

docker checkpoint ls


List checkpoints for a container

This command is experimental.

This command is experimental on the Docker daemon. It should not be used in production environments. To enable experimental features on the Docker daemon, edit the daemon.json and set experimental to true.

Experimental features provide early access to future product functionality. These features are intended for testing and feedback only as they may change between releases without warning or can be removed entirely from a future release. Experimental features must not be used in production environments. Docker does not offer support for experimental features.

For a list of current experimental features in the Docker CLI, see Docker CLI Experimental features.

Usage

$ docker checkpoint ls [OPTIONS] CONTAINER
+

Options

Name, shorthand Default Description
--checkpoint-dir Use a custom checkpoint storage directory

Parent command

Command Description
docker checkpoint Manage checkpoints
Command Description
docker checkpoint create Create a checkpoint from a running container
docker checkpoint ls List checkpoints for a container
docker checkpoint rm Remove a checkpoint
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/checkpoint_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_rm%2Findex.html new file mode 100644 index 00000000..7cefd72c --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcheckpoint_rm%2Findex.html @@ -0,0 +1,7 @@ +

docker checkpoint rm


Remove a checkpoint

This command is experimental.

This command is experimental on the Docker daemon. It should not be used in production environments. To enable experimental features on the Docker daemon, edit the daemon.json and set experimental to true.

Experimental features provide early access to future product functionality. These features are intended for testing and feedback only as they may change between releases without warning or can be removed entirely from a future release. Experimental features must not be used in production environments. Docker does not offer support for experimental features.

For a list of current experimental features in the Docker CLI, see Docker CLI Experimental features.

Usage

$ docker checkpoint rm [OPTIONS] CONTAINER CHECKPOINT
+

Options

Name, shorthand Default Description
--checkpoint-dir Use a custom checkpoint storage directory

Parent command

Command Description
docker checkpoint Manage checkpoints
Command Description
docker checkpoint create Create a checkpoint from a running container
docker checkpoint ls List checkpoints for a container
docker checkpoint rm Remove a checkpoint
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/checkpoint_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcli%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcli%2Findex.html new file mode 100644 index 00000000..c7e152f7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcli%2Findex.html @@ -0,0 +1,104 @@ +

Use the Docker command line

docker

To list available commands, either run docker with no parameters or execute docker help:

$ docker
+Usage: docker [OPTIONS] COMMAND [ARG...]
+       docker [ --help | -v | --version ]
+
+A self-sufficient runtime for containers.
+
+Options:
+      --config string      Location of client config files (default "/root/.docker")
+  -c, --context string     Name of the context to use to connect to the daemon (overrides DOCKER_HOST env var and default context set with "docker context use")
+  -D, --debug              Enable debug mode
+      --help               Print usage
+  -H, --host value         Daemon socket(s) to connect to (default [])
+  -l, --log-level string   Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info")
+      --tls                Use TLS; implied by --tlsverify
+      --tlscacert string   Trust certs signed only by this CA (default "/root/.docker/ca.pem")
+      --tlscert string     Path to TLS certificate file (default "/root/.docker/cert.pem")
+      --tlskey string      Path to TLS key file (default "/root/.docker/key.pem")
+      --tlsverify          Use TLS and verify the remote
+  -v, --version            Print version information and quit
+
+Commands:
+    attach    Attach to a running container
+    # […]
+

Description

Depending on your Docker system configuration, you may be required to preface each docker command with sudo. To avoid having to use sudo with the docker command, your system administrator can create a Unix group called docker and add users to it.

For more information about installing Docker or sudo configuration, refer to the installation instructions for your operating system.

Environment variables

The following list of environment variables are supported by the docker command line:

Variable Description
DOCKER_API_VERSION Override the negotiated API version to use for debugging (e.g. 1.19)
DOCKER_CERT_PATH Location of your authentication keys. This variable is used both by the docker CLI and the dockerd daemon +
DOCKER_CONFIG The location of your client configuration files.
DOCKER_CONTENT_TRUST_SERVER The URL of the Notary server to use. Defaults to the same URL as the registry.
DOCKER_CONTENT_TRUST When set Docker uses notary to sign and verify images. Equates to --disable-content-trust=false for build, create, pull, push, run.
DOCKER_CONTEXT Name of the docker context to use (overrides DOCKER_HOST env var and default context set with docker context use)
DOCKER_DEFAULT_PLATFORM Default platform for commands that take the --platform flag.
DOCKER_HIDE_LEGACY_COMMANDS When set, Docker hides “legacy” top-level commands (such as docker rm, and docker pull) in docker help output, and only Management commands per object-type (e.g., docker container) are printed. This may become the default in a future release, at which point this environment-variable is removed.
DOCKER_HOST Daemon socket to connect to.
DOCKER_STACK_ORCHESTRATOR Configure the default orchestrator to use when using docker stack management commands.
DOCKER_TLS_VERIFY When set Docker uses TLS and verifies the remote. This variable is used both by the docker CLI and the dockerd daemon +
BUILDKIT_PROGRESS Set type of progress output (auto, plain, tty) when building with BuildKit backend. Use plain to show container output (default auto).

Because Docker is developed using Go, you can also use any environment variables used by the Go runtime. In particular, you may find these useful:

These Go environment variables are case-insensitive. See the Go specification for details on these variables.

Configuration files

By default, the Docker command line stores its configuration files in a directory called .docker within your $HOME directory.

Docker manages most of the files in the configuration directory and you should not modify them. However, you can modify the config.json file to control certain aspects of how the docker command behaves.

You can modify the docker command behavior using environment variables or command-line options. You can also use options within config.json to modify some of the same behavior. If an environment variable and the --config flag are set, the flag takes precedent over the environment variable. Command line options override environment variables and environment variables override properties you specify in a config.json file.

Change the .docker directory

To specify a different directory, use the DOCKER_CONFIG environment variable or the --config command line option. If both are specified, then the --config option overrides the DOCKER_CONFIG environment variable. The example below overrides the docker ps command using a config.json file located in the ~/testconfigs/ directory.

$ docker --config ~/testconfigs/ ps
+

This flag only applies to whatever command is being ran. For persistent configuration, you can set the DOCKER_CONFIG environment variable in your shell (e.g. ~/.profile or ~/.bashrc). The example below sets the new directory to be HOME/newdir/.docker.

$ echo export DOCKER_CONFIG=$HOME/newdir/.docker > ~/.profile
+

Docker CLI configuration file (config.json) properties

Use the Docker CLI configuration to customize settings for the docker CLI. The configuration file uses JSON formatting, and properties:

By default, configuration file is stored in ~/.docker/config.json. Refer to the change the .docker directory section to use a different location.

Warning

The configuration file and other files inside the ~/.docker configuration directory may contain sensitive information, such as authentication information for proxies or, depending on your credential store, credentials for your image registries. Review your configuration file’s content before sharing with others, and prevent committing the file to version control.

Customize the default output format for commands

These fields allow you to customize the default output format for some commands if no --format flag is provided.

Property Description
configFormat Custom default format for docker config ls output. Refer to the format the output section in the docker config ls documentation for a list of supported formatting directives.
imagesFormat Custom default format for docker images / docker image ls output. Refer to the format the output section in the docker images documentation for a list of supported formatting directives.
nodesFormat Custom default format for docker node ls output. Refer to the formatting section in the docker node ls documentation for a list of supported formatting directives.
pluginsFormat Custom default format for docker plugin ls output. Refer to the formatting section in the docker plugin ls documentation for a list of supported formatting directives.
psFormat Custom default format for docker ps / docker container ps output. Refer to the formatting section in the docker ps documentation for a list of supported formatting directives.
secretFormat Custom default format for docker secret ls output. Refer to the format the output section in the docker secret ls documentation for a list of supported formatting directives.
serviceInspectFormat Custom default format for docker service inspect output. Refer to the formatting section in the docker service inspect documentation for a list of supported formatting directives.
servicesFormat Custom default format for docker service ls output. Refer to the formatting section in the docker service ls documentation for a list of supported formatting directives.
statsFormat Custom default format for docker stats output. Refer to the formatting section in the docker stats documentation for a list of supported formatting directives.

Custom HTTP headers

The property HttpHeaders specifies a set of headers to include in all messages sent from the Docker client to the daemon. Docker does not try to interpret or understand these headers; it simply puts them into the messages. Docker does not allow these headers to change any headers it sets for itself.

Credential store options

The property credsStore specifies an external binary to serve as the default credential store. When this property is set, docker login will attempt to store credentials in the binary specified by docker-credential-<value> which is visible on $PATH. If this property is not set, credentials will be stored in the auths property of the config. For more information, see the Credentials store section in the docker login documentation

The property credHelpers specifies a set of credential helpers to use preferentially over credsStore or auths when storing and retrieving credentials for specific registries. If this property is set, the binary docker-credential-<value> will be used when storing or retrieving credentials for a specific registry. For more information, see the Credential helpers section in the docker login documentation

Orchestrator options for docker stacks

The property stackOrchestrator specifies the default orchestrator to use when running docker stack management commands. Valid values are "swarm", "kubernetes", and "all". This property can be overridden with the DOCKER_STACK_ORCHESTRATOR environment variable, or the --orchestrator flag.

Automatic proxy configuration for containers

The property proxies specifies proxy environment variables to be automatically set on containers, and set as --build-arg on containers used during docker build. A "default" set of proxies can be configured, and will be used for any docker daemon that the client connects to, or a configuration per host (docker daemon), for example, “https://docker-daemon1.example.com”. The following properties can be set for each environment:

Property Description
httpProxy Default value of HTTP_PROXY and http_proxy for containers, and as --build-arg on docker build +
httpsProxy Default value of HTTPS_PROXY and https_proxy for containers, and as --build-arg on docker build +
ftpProxy Default value of FTP_PROXY and ftp_proxy for containers, and as --build-arg on docker build +
noProxy Default value of NO_PROXY and no_proxy for containers, and as --build-arg on docker build +

These settings are used to configure proxy settings for containers only, and not used as proxy settings for the docker CLI or the dockerd daemon. Refer to the environment variables and HTTP/HTTPS proxy sections for configuring proxy settings for the cli and daemon.

Warning

Proxy settings may contain sensitive information (for example, if the proxy requires authentication). Environment variables are stored as plain text in the container’s configuration, and as such can be inspected through the remote API or committed to an image when using docker commit.

Default key-sequence to detach from containers

Once attached to a container, users detach from it and leave it running using the using CTRL-p CTRL-q key sequence. This detach key sequence is customizable using the detachKeys property. Specify a <sequence> value for the property. The format of the <sequence> is a comma-separated list of either a letter [a-Z], or the ctrl- combined with any of the following:

Your customization applies to all containers started in with your Docker client. Users can override your custom or the default key sequence on a per-container basis. To do this, the user specifies the --detach-keys flag with the docker attach, docker exec, docker run or docker start command.

CLI Plugin options

The property plugins contains settings specific to CLI plugins. The key is the plugin name, while the value is a further map of options, which are specific to that plugin.

Sample configuration file

Following is a sample config.json file to illustrate the format used for various fields:

+{
+  "HttpHeaders": {
+    "MyHeader": "MyValue"
+  },
+  "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}",
+  "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}",
+  "pluginsFormat": "table {{.ID}}\t{{.Name}}\t{{.Enabled}}",
+  "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}",
+  "servicesFormat": "table {{.ID}}\t{{.Name}}\t{{.Mode}}",
+  "secretFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}",
+  "configFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}",
+  "serviceInspectFormat": "pretty",
+  "nodesFormat": "table {{.ID}}\t{{.Hostname}}\t{{.Availability}}",
+  "detachKeys": "ctrl-e,e",
+  "credsStore": "secretservice",
+  "credHelpers": {
+    "awesomereg.example.org": "hip-star",
+    "unicorn.example.com": "vcbait"
+  },
+  "stackOrchestrator": "kubernetes",
+  "plugins": {
+    "plugin1": {
+      "option": "value"
+    },
+    "plugin2": {
+      "anotheroption": "anothervalue",
+      "athirdoption": "athirdvalue"
+    }
+  },
+  "proxies": {
+    "default": {
+      "httpProxy":  "http://user:pass@example.com:3128",
+      "httpsProxy": "https://my-proxy.example.com:3129",
+      "noProxy":    "intra.mycorp.example.com",
+      "ftpProxy":   "http://user:pass@example.com:3128"
+    },
+    "https://manager1.mycorp.example.com:2377": {
+      "httpProxy":  "http://user:pass@example.com:3128",
+      "httpsProxy": "https://my-proxy.example.com:3129"
+    }
+  }
+}
+
+

Experimental features

Experimental features provide early access to future product functionality. These features are intended for testing and feedback, and they may change between releases without warning or can be removed from a future release.

Starting with Docker 20.10, experimental CLI features are enabled by default, and require no configuration to enable them.

Notary

If using your own notary server and a self-signed certificate or an internal Certificate Authority, you need to place the certificate at tls/<registry_url>/ca.crt in your docker config directory.

Alternatively you can trust the certificate globally by adding it to your system’s list of root Certificate Authorities.

Examples

Display help text

To list the help on any command just execute the command, followed by the --help option.

$ docker run --help
+
+Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
+
+Run a command in a new container
+
+Options:
+      --add-host value             Add a custom host-to-IP mapping (host:ip) (default [])
+  -a, --attach value               Attach to STDIN, STDOUT or STDERR (default [])
+<...>
+

Option types

Single character command line options can be combined, so rather than typing docker run -i -t --name test busybox sh, you can write docker run -it --name test busybox sh.

Boolean

Boolean options take the form -d=false. The value you see in the help text is the default value which is set if you do not specify that flag. If you specify a Boolean flag without a value, this will set the flag to true, irrespective of the default value.

For example, running docker run -d will set the value to true, so your container will run in “detached” mode, in the background.

Options which default to true (e.g., docker build --rm=true) can only be set to the non-default value by explicitly setting them to false:

$ docker build --rm=false .
+

Multi

You can specify options like -a=[] multiple times in a single command line, for example in these commands:

$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash
+
+$ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls
+

Sometimes, multiple options can call for a more complex value string as for -v:

$ docker run -v /host:/container example/mysql
+

Note

Do not use the -t and -a stderr options together due to limitations in the pty implementation. All stderr in pty mode simply goes to stdout.

Strings and Integers

Options like --name="" expect a string, and they can only be specified once. Options like -c=0 expect an integer, and they can only be specified once.

+

Docker, Docker documentation, CLI, command line, config.json, CLI configuration file

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/cli/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcommit%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcommit%2Findex.html new file mode 100644 index 00000000..f5402e6f --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcommit%2Findex.html @@ -0,0 +1,66 @@ +

docker commit


Create a new image from a container’s changes

Usage

$ docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

It can be useful to commit a container’s file changes or settings into a new image. This allows you to debug a container by running an interactive shell, or to export a working dataset to another server. Generally, it is better to use Dockerfiles to manage your images in a documented and maintainable way. Read more about valid image names and tags.

The commit operation will not include any data contained in volumes mounted inside the container.

By default, the container being committed and its processes will be paused while the image is committed. This reduces the likelihood of encountering data corruption during the process of creating the commit. If this behavior is undesired, set the --pause option to false.

The --change option will apply Dockerfile instructions to the image that is created. Supported Dockerfile instructions: CMD|ENTRYPOINT|ENV|EXPOSE|LABEL|ONBUILD|USER|VOLUME|WORKDIR

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--author , -a + Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
+--change , -c + Apply Dockerfile instruction to the created image
+--message , -m + Commit message
+--pause , -p + true Pause container during commit

Examples

Commit a container

$ docker ps
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS              NAMES
+c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                            desperate_dubinsky
+197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                            focused_hamilton
+
+$ docker commit c3f279d17e0a  svendowideit/testimage:version3
+
+f5283438590d
+
+$ docker images
+
+REPOSITORY                        TAG                 ID                  CREATED             SIZE
+svendowideit/testimage            version3            f5283438590d        16 seconds ago      335.7 MB
+

Commit a container with new configurations

$ docker ps
+
+CONTAINER ID       IMAGE               COMMAND             CREATED             STATUS              PORTS              NAMES
+c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                            desperate_dubinsky
+197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                            focused_hamilton
+
+$ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a
+
+[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin]
+
+$ docker commit --change "ENV DEBUG=true" c3f279d17e0a  svendowideit/testimage:version3
+
+f5283438590d
+
+$ docker inspect -f "{{ .Config.Env }}" f5283438590d
+
+[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true]
+

Commit a container with new CMD and EXPOSE instructions

$ docker ps
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS              NAMES
+c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                            desperate_dubinsky
+197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                            focused_hamilton
+
+$ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a  svendowideit/testimage:version4
+
+f5283438590d
+
+$ docker run -d svendowideit/testimage:version4
+
+89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0
+
+$ docker ps
+
+CONTAINER ID        IMAGE               COMMAND                 CREATED             STATUS              PORTS              NAMES
+89373736e2e7        testimage:version4  "apachectl -DFOREGROU"  3 seconds ago       Up 2 seconds        80/tcp             distracted_fermat
+c3f279d17e0a        ubuntu:12.04        /bin/bash               7 days ago          Up 25 hours                            desperate_dubinsky
+197387f1b436        ubuntu:12.04        /bin/bash               7 days ago          Up 25 hours                            focused_hamilton
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/commit/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig%2Findex.html new file mode 100644 index 00000000..09aa3f55 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig%2Findex.html @@ -0,0 +1,7 @@ +

docker config


Manage Docker configs

Swarm This command works with the Swarm orchestrator.

Usage

$ docker config COMMAND
+

Description

Manage configs.

Child commands

Command Description
docker config create Create a config from a file or STDIN
docker config inspect Display detailed information on one or more configs
docker config ls List configs
docker config rm Remove one or more configs

More info

Store configuration data using Docker Configs

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/config/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_create%2Findex.html new file mode 100644 index 00000000..8ba5438f --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_create%2Findex.html @@ -0,0 +1,51 @@ +

docker config create


Create a config from a file or STDIN

Swarm This command works with the Swarm orchestrator.

Usage

$ docker config create [OPTIONS] CONFIG file|-
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Creates a config using standard input or from a file for the config content.

For detailed information about using configs, refer to store configuration data using Docker Configs.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--label , -l + Config labels
--template-driver Template driver

Examples

Create a config

$ printf <config> | docker config create my_config -
+
+onakdyv307se2tl7nl20anokv
+
+$ docker config ls
+
+ID                          NAME                CREATED             UPDATED
+onakdyv307se2tl7nl20anokv   my_config           6 seconds ago       6 seconds ago
+

Create a config with a file

$ docker config create my_config ./config.json
+
+dg426haahpi5ezmkkj5kyl3sn
+
+$ docker config ls
+
+ID                          NAME                CREATED             UPDATED
+dg426haahpi5ezmkkj5kyl3sn   my_config           7 seconds ago       7 seconds ago
+

Create a config with labels

$ docker config create \
+    --label env=dev \
+    --label rev=20170324 \
+    my_config ./config.json
+
+eo7jnzguqgtpdah3cm5srfb97
+
$ docker config inspect my_config
+
+[
+    {
+        "ID": "eo7jnzguqgtpdah3cm5srfb97",
+        "Version": {
+            "Index": 17
+        },
+        "CreatedAt": "2017-03-24T08:15:09.735271783Z",
+        "UpdatedAt": "2017-03-24T08:15:09.735271783Z",
+        "Spec": {
+            "Name": "my_config",
+            "Labels": {
+                "env": "dev",
+                "rev": "20170324"
+            },
+            "Data": "aGVsbG8K"
+        }
+    }
+]
+

Parent command

Command Description
docker config Manage Docker configs
Command Description
docker config create Create a config from a file or STDIN
docker config inspect Display detailed information on one or more configs
docker config ls List configs
docker config rm Remove one or more configs
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/config_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_inspect%2Findex.html new file mode 100644 index 00000000..eca63d17 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_inspect%2Findex.html @@ -0,0 +1,35 @@ +

docker config inspect


Display detailed information on one or more configs

Swarm This command works with the Swarm orchestrator.

Usage

$ docker config inspect [OPTIONS] CONFIG [CONFIG...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Inspects the specified config.

By default, this renders all results in a JSON array. If a format is specified, the given template will be executed for each result.

Go’s text/template package describes all the details of the format.

For detailed information about using configs, refer to store configuration data using Docker Configs.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
--pretty Print the information in a human friendly format

Examples

Inspect a config by name or ID

You can inspect a config, either by its name, or ID

For example, given the following config:

$ docker config ls
+
+ID                          NAME                CREATED             UPDATED
+eo7jnzguqgtpdah3cm5srfb97   my_config           3 minutes ago       3 minutes ago
+
$ docker config inspect config.json
+

The output is in JSON format, for example:

[
+  {
+    "ID": "eo7jnzguqgtpdah3cm5srfb97",
+    "Version": {
+      "Index": 17
+    },
+    "CreatedAt": "2017-03-24T08:15:09.735271783Z",
+    "UpdatedAt": "2017-03-24T08:15:09.735271783Z",
+    "Spec": {
+      "Name": "my_config",
+      "Labels": {
+        "env": "dev",
+        "rev": "20170324"
+      },
+      "Data": "aGVsbG8K"
+    }
+  }
+]
+

Formatting

You can use the --format option to obtain specific information about a config. The following example command outputs the creation time of the config.

$ docker config inspect --format='{{.CreatedAt}}' eo7jnzguqgtpdah3cm5srfb97
+
+2017-03-24 08:15:09.735271783 +0000 UTC
+

Parent command

Command Description
docker config Manage Docker configs
Command Description
docker config create Create a config from a file or STDIN
docker config inspect Display detailed information on one or more configs
docker config ls List configs
docker config rm Remove one or more configs
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/config_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_ls%2Findex.html new file mode 100644 index 00000000..4715cbcb --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_ls%2Findex.html @@ -0,0 +1,48 @@ +

docker config ls


List configs

Swarm This command works with the Swarm orchestrator.

Usage

$ docker config ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Run this command on a manager node to list the configs in the swarm.

For detailed information about using configs, refer to store configuration data using Docker Configs.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print configs using a Go template
+--quiet , -q + Only display IDs

Examples

$ docker config ls
+
+ID                          NAME                        CREATED             UPDATED
+6697bflskwj1998km1gnnjr38   q5s5570vtvnimefos1fyeo2u2   6 weeks ago         6 weeks ago
+9u9hk4br2ej0wgngkga6rp4hq   my_config                   5 weeks ago         5 weeks ago
+mem02h8n73mybpgqjf0kfi1n0   test_config                 3 seconds ago       3 seconds ago
+

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

id

The id filter matches all or prefix of a config’s id.

$ docker config ls -f "id=6697bflskwj1998km1gnnjr38"
+
+ID                          NAME                        CREATED             UPDATED
+6697bflskwj1998km1gnnjr38   q5s5570vtvnimefos1fyeo2u2   6 weeks ago         6 weeks ago
+

label

The label filter matches configs based on the presence of a label alone or a label and a value.

The following filter matches all configs with a project label regardless of its value:

$ docker config ls --filter label=project
+
+ID                          NAME                        CREATED             UPDATED
+mem02h8n73mybpgqjf0kfi1n0   test_config                 About an hour ago   About an hour ago
+

The following filter matches only services with the project label with the project-a value.

$ docker service ls --filter label=project=test
+
+ID                          NAME                        CREATED             UPDATED
+mem02h8n73mybpgqjf0kfi1n0   test_config                 About an hour ago   About an hour ago
+

name

The name filter matches on all or prefix of a config’s name.

The following filter matches config with a name containing a prefix of test.

$ docker config ls --filter name=test_config
+
+ID                          NAME                        CREATED             UPDATED
+mem02h8n73mybpgqjf0kfi1n0   test_config                 About an hour ago   About an hour ago
+

Format the output

The formatting option (--format) pretty prints configs output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Config ID
.Name Config name
.CreatedAt Time when the config was created
.UpdatedAt Time when the config was updated
.Labels All labels assigned to the config
.Label Value of a specific label for this config. For example {{.Label "my-label"}} +

When using the --format option, the config ls command will either output the data exactly as the template declares or, when using the table directive, will include column headers as well.

The following example uses a template without headers and outputs the ID and Name entries separated by a colon (:) for all images:

$ docker config ls --format "{{.ID}}: {{.Name}}"
+
+77af4d6b9913: config-1
+b6fa739cedf5: config-2
+78a85c484f71: config-3
+

To list all configs with their name and created date in a table format you can use:

$ docker config ls --format "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}"
+
+ID                  NAME                      CREATED
+77af4d6b9913        config-1                  5 minutes ago
+b6fa739cedf5        config-2                  3 hours ago
+78a85c484f71        config-3                  10 days ago
+

Parent command

Command Description
docker config Manage Docker configs
Command Description
docker config create Create a config from a file or STDIN
docker config inspect Display detailed information on one or more configs
docker config ls List configs
docker config rm Remove one or more configs
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/config_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_rm%2Findex.html new file mode 100644 index 00000000..60c91340 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fconfig_rm%2Findex.html @@ -0,0 +1,9 @@ +

docker config rm


Remove one or more configs

Swarm This command works with the Swarm orchestrator.

Usage

$ docker config rm CONFIG [CONFIG...]
+

Description

Removes the specified configs from the swarm.

For detailed information about using configs, refer to store configuration data using Docker Configs.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Examples

This example removes a config:

$ docker config rm my_config
+sapth4csdo5b6wz2p5uimh5xg
+

Warning

Unlike docker rm, this command does not ask for confirmation before removing a config.

Parent command

Command Description
docker config Manage Docker configs
Command Description
docker config create Create a config from a file or STDIN
docker config inspect Display detailed information on one or more configs
docker config ls List configs
docker config rm Remove one or more configs
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/config_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer%2Findex.html new file mode 100644 index 00000000..ef7a2b36 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer%2Findex.html @@ -0,0 +1,7 @@ +

docker container


Manage containers

Usage

$ docker container COMMAND
+

Description

Manage containers.

Child commands

Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_attach%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_attach%2Findex.html new file mode 100644 index 00000000..bbf57d97 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_attach%2Findex.html @@ -0,0 +1,7 @@ +

docker container attach


Attach local standard input, output, and error streams to a running container

Usage

$ docker container attach [OPTIONS] CONTAINER
+

Options

Name, shorthand Default Description
--detach-keys Override the key sequence for detaching a container
--no-stdin Do not attach STDIN
--sig-proxy true Proxy all received signals to the process

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_attach/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_commit%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_commit%2Findex.html new file mode 100644 index 00000000..82e2df42 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_commit%2Findex.html @@ -0,0 +1,15 @@ +

docker container commit


Create a new image from a container’s changes

Usage

$ docker container commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
+

Options

Name, shorthand Default Description
+--author , -a + Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
+--change , -c + Apply Dockerfile instruction to the created image
+--message , -m + Commit message
+--pause , -p + true Pause container during commit

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_commit/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_cp%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_cp%2Findex.html new file mode 100644 index 00000000..739bd0f1 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_cp%2Findex.html @@ -0,0 +1,12 @@ +

docker container cp


Copy files/folders between a container and the local filesystem

Usage

$ docker container cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|-
+docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Copy files/folders between a container and the local filesystem

Use ‘-‘ as the source to read a tar archive from stdin and extract it to a directory destination in a container. Use ‘-‘ as the destination to stream a tar archive of a container source to stdout.

Options

Name, shorthand Default Description
+--archive , -a + Archive mode (copy all uid/gid information)
+--follow-link , -L + Always follow symbol link in SRC_PATH

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_cp/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_create%2Findex.html new file mode 100644 index 00000000..6bdf9147 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_create%2Findex.html @@ -0,0 +1,35 @@ +

docker container create


Create a new container

Usage

$ docker container create [OPTIONS] IMAGE [COMMAND] [ARG...]
+

Options

Name, shorthand Default Description
--add-host Add a custom host-to-IP mapping (host:ip)
+--attach , -a + Attach to STDIN, STDOUT or STDERR
--blkio-weight Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)
--blkio-weight-device Block IO weight (relative device weight)
--cap-add Add Linux capabilities
--cap-drop Drop Linux capabilities
--cgroup-parent Optional parent cgroup for the container
--cgroupns +API 1.41+
Cgroup namespace to use (host|private) 'host': Run the container in the Docker host's cgroup namespace 'private': Run the container in its own private cgroup namespace '': Use the cgroup namespace as configured by the default-cgroupns-mode option on the daemon (default)
--cidfile Write the container ID to the file
--cpu-count CPU count (Windows only)
--cpu-percent CPU percent (Windows only)
--cpu-period Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit CPU CFS (Completely Fair Scheduler) quota
--cpu-rt-period Limit CPU real-time period in microseconds
--cpu-rt-runtime Limit CPU real-time runtime in microseconds
+--cpu-shares , -c + CPU shares (relative weight)
--cpus Number of CPUs
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
--device Add a host device to the container
--device-cgroup-rule Add a rule to the cgroup allowed devices list
--device-read-bps Limit read rate (bytes per second) from a device
--device-read-iops Limit read rate (IO per second) from a device
--device-write-bps Limit write rate (bytes per second) to a device
--device-write-iops Limit write rate (IO per second) to a device
--disable-content-trust true Skip image verification
--dns Set custom DNS servers
--dns-opt Set DNS options
--dns-option Set DNS options
--dns-search Set custom DNS search domains
--domainname Container NIS domain name
--entrypoint Overwrite the default ENTRYPOINT of the image
+--env , -e + Set environment variables
--env-file Read in a file of environment variables
--expose Expose a port or a range of ports
--gpus +API 1.40+
GPU devices to add to the container ('all' to pass all GPUs)
--group-add Add additional groups to join
--health-cmd Command to run to check health
--health-interval Time between running the check (ms|s|m|h) (default 0s)
--health-retries Consecutive failures needed to report unhealthy
--health-start-period Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)
--health-timeout Maximum time to allow one check to run (ms|s|m|h) (default 0s)
--help Print usage
+--hostname , -h + Container host name
--init Run an init inside the container that forwards signals and reaps processes
+--interactive , -i + Keep STDIN open even if not attached
--io-maxbandwidth Maximum IO bandwidth limit for the system drive (Windows only)
--io-maxiops Maximum IOps limit for the system drive (Windows only)
--ip IPv4 address (e.g., 172.30.100.104)
--ip6 IPv6 address (e.g., 2001:db8::33)
--ipc IPC mode to use
--isolation Container isolation technology
--kernel-memory Kernel memory limit
+--label , -l + Set meta data on a container
--label-file Read in a line delimited file of labels
--link Add link to another container
--link-local-ip Container IPv4/IPv6 link-local addresses
--log-driver Logging driver for the container
--log-opt Log driver options
--mac-address Container MAC address (e.g., 92:d0:c6:0a:29:33)
+--memory , -m + Memory limit
--memory-reservation Memory soft limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--memory-swappiness -1 Tune container memory swappiness (0 to 100)
--mount Attach a filesystem mount to the container
--name Assign a name to the container
--net Connect a container to a network
--net-alias Add network-scoped alias for the container
--network Connect a container to a network
--network-alias Add network-scoped alias for the container
--no-healthcheck Disable any container-specified HEALTHCHECK
--oom-kill-disable Disable OOM Killer
--oom-score-adj Tune host's OOM preferences (-1000 to 1000)
--pid PID namespace to use
--pids-limit Tune container pids limit (set -1 for unlimited)
--platform Set platform if server is multi-platform capable
--privileged Give extended privileges to this container
+--publish , -p + Publish a container's port(s) to the host
+--publish-all , -P + Publish all exposed ports to random ports
--pull missing Pull image before creating ("always"|"missing"|"never")
--read-only Mount the container's root filesystem as read only
--restart no Restart policy to apply when a container exits
--rm Automatically remove the container when it exits
--runtime Runtime to use for this container
--security-opt Security Options
--shm-size Size of /dev/shm
--stop-signal SIGTERM Signal to stop a container
--stop-timeout Timeout (in seconds) to stop a container
--storage-opt Storage driver options for the container
--sysctl Sysctl options
--tmpfs Mount a tmpfs directory
+--tty , -t + Allocate a pseudo-TTY
--ulimit Ulimit options
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
--userns User namespace to use
--uts UTS namespace to use
+--volume , -v + Bind mount a volume
--volume-driver Optional volume driver for the container
--volumes-from Mount volumes from the specified container(s)
+--workdir , -w + Working directory inside the container

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_diff%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_diff%2Findex.html new file mode 100644 index 00000000..75d33f79 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_diff%2Findex.html @@ -0,0 +1,7 @@ +

docker container diff


Inspect changes to files or directories on a container’s filesystem

Usage

$ docker container diff CONTAINER
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_diff/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_exec%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_exec%2Findex.html new file mode 100644 index 00000000..9a581e32 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_exec%2Findex.html @@ -0,0 +1,19 @@ +

docker container exec


Run a command in a running container

Usage

$ docker container exec [OPTIONS] CONTAINER COMMAND [ARG...]
+

Options

Name, shorthand Default Description
+--detach , -d + Detached mode: run command in the background
--detach-keys Override the key sequence for detaching a container
+--env , -e + Set environment variables
--env-file Read in a file of environment variables
+--interactive , -i + Keep STDIN open even if not attached
--privileged Give extended privileges to the command
+--tty , -t + Allocate a pseudo-TTY
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
+--workdir , -w + Working directory inside the container

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_exec/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_export%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_export%2Findex.html new file mode 100644 index 00000000..df3e5bbc --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_export%2Findex.html @@ -0,0 +1,9 @@ +

docker container export


Export a container’s filesystem as a tar archive

Usage

$ docker container export [OPTIONS] CONTAINER
+

Options

Name, shorthand Default Description
+--output , -o + Write to a file, instead of STDOUT

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_export/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_inspect%2Findex.html new file mode 100644 index 00000000..d1ad0cc7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_inspect%2Findex.html @@ -0,0 +1,11 @@ +

docker container inspect


Display detailed information on one or more containers

Usage

$ docker container inspect [OPTIONS] CONTAINER [CONTAINER...]
+

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
+--size , -s + Display total file sizes

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_kill%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_kill%2Findex.html new file mode 100644 index 00000000..60c99770 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_kill%2Findex.html @@ -0,0 +1,9 @@ +

docker container kill


Kill one or more running containers

Usage

$ docker container kill [OPTIONS] CONTAINER [CONTAINER...]
+

Options

Name, shorthand Default Description
+--signal , -s + KILL Signal to send to the container

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_kill/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_logs%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_logs%2Findex.html new file mode 100644 index 00000000..cc5fb665 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_logs%2Findex.html @@ -0,0 +1,13 @@ +

docker container logs


Fetch the logs of a container

Usage

$ docker container logs [OPTIONS] CONTAINER
+

Options

Name, shorthand Default Description
--details Show extra details provided to logs
+--follow , -f + Follow log output
--since Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)
+--tail , -n + all Number of lines to show from the end of the logs
+--timestamps , -t + Show timestamps
--until Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_logs/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_ls%2Findex.html new file mode 100644 index 00000000..c0e42bc1 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_ls%2Findex.html @@ -0,0 +1,19 @@ +

docker container ls


List containers

Usage

$ docker container ls [OPTIONS]
+

Options

Name, shorthand Default Description
+--all , -a + Show all containers (default shows just running)
+--filter , -f + Filter output based on conditions provided
--format Pretty-print containers using a Go template
+--last , -n + -1 Show n last created containers (includes all states)
+--latest , -l + Show the latest created container (includes all states)
--no-trunc Don't truncate output
+--quiet , -q + Only display container IDs
+--size , -s + Display total file sizes

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_pause%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_pause%2Findex.html new file mode 100644 index 00000000..3099447b --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_pause%2Findex.html @@ -0,0 +1,7 @@ +

docker container pause


Pause all processes within one or more containers

Usage

$ docker container pause CONTAINER [CONTAINER...]
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_pause/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_port%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_port%2Findex.html new file mode 100644 index 00000000..45f92566 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_port%2Findex.html @@ -0,0 +1,7 @@ +

docker container port


List port mappings or a specific mapping for the container

Usage

$ docker container port CONTAINER [PRIVATE_PORT[/PROTO]]
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_port/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_prune%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_prune%2Findex.html new file mode 100644 index 00000000..0e57681a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_prune%2Findex.html @@ -0,0 +1,51 @@ +

docker container prune


Remove all stopped containers

Usage

$ docker container prune [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Removes all stopped containers.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--filter Provide filter values (e.g. 'until=<timestamp>')
+--force , -f + Do not prompt for confirmation

Examples

Prune containers

$ docker container prune
+WARNING! This will remove all stopped containers.
+Are you sure you want to continue? [y/N] y
+Deleted Containers:
+4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063
+f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360
+
+Total reclaimed space: 212 B
+

Filtering

The filtering flag (--filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

The until filter can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative to the daemon machine’s time. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the daemon will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long.

The label filter accepts two formats. One is the label=... (label=<key> or label=<key>=<value>), which removes containers with the specified labels. The other format is the label!=... (label!=<key> or label!=<key>=<value>), which removes containers without the specified labels.

The following removes containers created more than 5 minutes ago:

$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
+
+CONTAINER ID        IMAGE               COMMAND             CREATED AT                      STATUS
+61b9efa71024        busybox             "sh"                2017-01-04 13:23:33 -0800 PST   Exited (0) 41 seconds ago
+53a9bc23a516        busybox             "sh"                2017-01-04 13:11:59 -0800 PST   Exited (0) 12 minutes ago
+
+$ docker container prune --force --filter "until=5m"
+
+Deleted Containers:
+53a9bc23a5168b6caa2bfbefddf1b30f93c7ad57f3dec271fd32707497cb9369
+
+Total reclaimed space: 25 B
+
+$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
+
+CONTAINER ID        IMAGE               COMMAND             CREATED AT                      STATUS
+61b9efa71024        busybox             "sh"                2017-01-04 13:23:33 -0800 PST   Exited (0) 44 seconds ago
+

The following removes containers created before 2017-01-04T13:10:00:

$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
+
+CONTAINER ID        IMAGE               COMMAND             CREATED AT                      STATUS
+53a9bc23a516        busybox             "sh"                2017-01-04 13:11:59 -0800 PST   Exited (0) 7 minutes ago
+4a75091a6d61        busybox             "sh"                2017-01-04 13:09:53 -0800 PST   Exited (0) 9 minutes ago
+
+$ docker container prune --force --filter "until=2017-01-04T13:10:00"
+
+Deleted Containers:
+4a75091a6d618526fcd8b33ccd6e5928ca2a64415466f768a6180004b0c72c6c
+
+Total reclaimed space: 27 B
+
+$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
+
+CONTAINER ID        IMAGE               COMMAND             CREATED AT                      STATUS
+53a9bc23a516        busybox             "sh"                2017-01-04 13:11:59 -0800 PST   Exited (0) 9 minutes ago
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_prune/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rename%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rename%2Findex.html new file mode 100644 index 00000000..84d8b01a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rename%2Findex.html @@ -0,0 +1,7 @@ +

docker container rename


Rename a container

Usage

$ docker container rename CONTAINER NEW_NAME
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_rename/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_restart%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_restart%2Findex.html new file mode 100644 index 00000000..b70812f4 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_restart%2Findex.html @@ -0,0 +1,9 @@ +

docker container restart


Restart one or more containers

Usage

$ docker container restart [OPTIONS] CONTAINER [CONTAINER...]
+

Options

Name, shorthand Default Description
+--time , -t + 10 Seconds to wait for stop before killing the container

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_restart/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rm%2Findex.html new file mode 100644 index 00000000..8ddcf19d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_rm%2Findex.html @@ -0,0 +1,13 @@ +

docker container rm


Remove one or more containers

Usage

$ docker container rm [OPTIONS] CONTAINER [CONTAINER...]
+

Options

Name, shorthand Default Description
+--force , -f + Force the removal of a running container (uses SIGKILL)
+--link , -l + Remove the specified link
+--volumes , -v + Remove anonymous volumes associated with the container

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_run%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_run%2Findex.html new file mode 100644 index 00000000..c91a04cb --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_run%2Findex.html @@ -0,0 +1,37 @@ +

docker container run


Run a command in a new container

Usage

$ docker container run [OPTIONS] IMAGE [COMMAND] [ARG...]
+

Options

Name, shorthand Default Description
--add-host Add a custom host-to-IP mapping (host:ip)
+--attach , -a + Attach to STDIN, STDOUT or STDERR
--blkio-weight Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)
--blkio-weight-device Block IO weight (relative device weight)
--cap-add Add Linux capabilities
--cap-drop Drop Linux capabilities
--cgroup-parent Optional parent cgroup for the container
--cgroupns +API 1.41+
Cgroup namespace to use (host|private) 'host': Run the container in the Docker host's cgroup namespace 'private': Run the container in its own private cgroup namespace '': Use the cgroup namespace as configured by the default-cgroupns-mode option on the daemon (default)
--cidfile Write the container ID to the file
--cpu-count CPU count (Windows only)
--cpu-percent CPU percent (Windows only)
--cpu-period Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit CPU CFS (Completely Fair Scheduler) quota
--cpu-rt-period Limit CPU real-time period in microseconds
--cpu-rt-runtime Limit CPU real-time runtime in microseconds
+--cpu-shares , -c + CPU shares (relative weight)
--cpus Number of CPUs
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
+--detach , -d + Run container in background and print container ID
--detach-keys Override the key sequence for detaching a container
--device Add a host device to the container
--device-cgroup-rule Add a rule to the cgroup allowed devices list
--device-read-bps Limit read rate (bytes per second) from a device
--device-read-iops Limit read rate (IO per second) from a device
--device-write-bps Limit write rate (bytes per second) to a device
--device-write-iops Limit write rate (IO per second) to a device
--disable-content-trust true Skip image verification
--dns Set custom DNS servers
--dns-opt Set DNS options
--dns-option Set DNS options
--dns-search Set custom DNS search domains
--domainname Container NIS domain name
--entrypoint Overwrite the default ENTRYPOINT of the image
+--env , -e + Set environment variables
--env-file Read in a file of environment variables
--expose Expose a port or a range of ports
--gpus +API 1.40+
GPU devices to add to the container ('all' to pass all GPUs)
--group-add Add additional groups to join
--health-cmd Command to run to check health
--health-interval Time between running the check (ms|s|m|h) (default 0s)
--health-retries Consecutive failures needed to report unhealthy
--health-start-period Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)
--health-timeout Maximum time to allow one check to run (ms|s|m|h) (default 0s)
--help Print usage
+--hostname , -h + Container host name
--init Run an init inside the container that forwards signals and reaps processes
+--interactive , -i + Keep STDIN open even if not attached
--io-maxbandwidth Maximum IO bandwidth limit for the system drive (Windows only)
--io-maxiops Maximum IOps limit for the system drive (Windows only)
--ip IPv4 address (e.g., 172.30.100.104)
--ip6 IPv6 address (e.g., 2001:db8::33)
--ipc IPC mode to use
--isolation Container isolation technology
--kernel-memory Kernel memory limit
+--label , -l + Set meta data on a container
--label-file Read in a line delimited file of labels
--link Add link to another container
--link-local-ip Container IPv4/IPv6 link-local addresses
--log-driver Logging driver for the container
--log-opt Log driver options
--mac-address Container MAC address (e.g., 92:d0:c6:0a:29:33)
+--memory , -m + Memory limit
--memory-reservation Memory soft limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--memory-swappiness -1 Tune container memory swappiness (0 to 100)
--mount Attach a filesystem mount to the container
--name Assign a name to the container
--net Connect a container to a network
--net-alias Add network-scoped alias for the container
--network Connect a container to a network
--network-alias Add network-scoped alias for the container
--no-healthcheck Disable any container-specified HEALTHCHECK
--oom-kill-disable Disable OOM Killer
--oom-score-adj Tune host's OOM preferences (-1000 to 1000)
--pid PID namespace to use
--pids-limit Tune container pids limit (set -1 for unlimited)
--platform Set platform if server is multi-platform capable
--privileged Give extended privileges to this container
+--publish , -p + Publish a container's port(s) to the host
+--publish-all , -P + Publish all exposed ports to random ports
--pull missing Pull image before running ("always"|"missing"|"never")
--read-only Mount the container's root filesystem as read only
--restart no Restart policy to apply when a container exits
--rm Automatically remove the container when it exits
--runtime Runtime to use for this container
--security-opt Security Options
--shm-size Size of /dev/shm
--sig-proxy true Proxy received signals to the process
--stop-signal SIGTERM Signal to stop a container
--stop-timeout Timeout (in seconds) to stop a container
--storage-opt Storage driver options for the container
--sysctl Sysctl options
--tmpfs Mount a tmpfs directory
+--tty , -t + Allocate a pseudo-TTY
--ulimit Ulimit options
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
--userns User namespace to use
--uts UTS namespace to use
+--volume , -v + Bind mount a volume
--volume-driver Optional volume driver for the container
--volumes-from Mount volumes from the specified container(s)
+--workdir , -w + Working directory inside the container

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_run/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_start%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_start%2Findex.html new file mode 100644 index 00000000..aba39c89 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_start%2Findex.html @@ -0,0 +1,13 @@ +

docker container start


Start one or more stopped containers

Usage

$ docker container start [OPTIONS] CONTAINER [CONTAINER...]
+

Options

Name, shorthand Default Description
+--attach , -a + Attach STDOUT/STDERR and forward signals
--checkpoint +experimental (daemon)
Restore from this checkpoint
--checkpoint-dir +experimental (daemon)
Use a custom checkpoint storage directory
--detach-keys Override the key sequence for detaching a container
+--interactive , -i + Attach container's STDIN

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_start/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stats%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stats%2Findex.html new file mode 100644 index 00000000..e8017056 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stats%2Findex.html @@ -0,0 +1,9 @@ +

docker container stats


Display a live stream of container(s) resource usage statistics

Usage

$ docker container stats [OPTIONS] [CONTAINER...]
+

Options

Name, shorthand Default Description
+--all , -a + Show all containers (default shows just running)
--format Pretty-print images using a Go template
--no-stream Disable streaming stats and only pull the first result
--no-trunc Do not truncate output

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_stats/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stop%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stop%2Findex.html new file mode 100644 index 00000000..6227b9cf --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_stop%2Findex.html @@ -0,0 +1,9 @@ +

docker container stop


Stop one or more running containers

Usage

$ docker container stop [OPTIONS] CONTAINER [CONTAINER...]
+

Options

Name, shorthand Default Description
+--time , -t + 10 Seconds to wait for stop before killing it

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_stop/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_top%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_top%2Findex.html new file mode 100644 index 00000000..29d08950 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_top%2Findex.html @@ -0,0 +1,7 @@ +

docker container top


Display the running processes of a container

Usage

$ docker container top CONTAINER [ps OPTIONS]
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_top/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_unpause%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_unpause%2Findex.html new file mode 100644 index 00000000..50316572 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_unpause%2Findex.html @@ -0,0 +1,7 @@ +

docker container unpause


Unpause all processes within one or more containers

Usage

$ docker container unpause CONTAINER [CONTAINER...]
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_unpause/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_update%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_update%2Findex.html new file mode 100644 index 00000000..1ffebd5f --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_update%2Findex.html @@ -0,0 +1,12 @@ +

docker container update


Update configuration of one or more containers

Usage

$ docker container update [OPTIONS] CONTAINER [CONTAINER...]
+

Options

Name, shorthand Default Description
--blkio-weight Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)
--cpu-period Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit CPU CFS (Completely Fair Scheduler) quota
--cpu-rt-period Limit the CPU real-time period in microseconds
--cpu-rt-runtime Limit the CPU real-time runtime in microseconds
+--cpu-shares , -c + CPU shares (relative weight)
--cpus Number of CPUs
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
--kernel-memory Kernel memory limit
+--memory , -m + Memory limit
--memory-reservation Memory soft limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--pids-limit +API 1.40+
Tune container pids limit (set -1 for unlimited)
--restart Restart policy to apply when a container exits

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_update/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_wait%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_wait%2Findex.html new file mode 100644 index 00000000..7f7242bd --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontainer_wait%2Findex.html @@ -0,0 +1,7 @@ +

docker container wait


Block until one or more containers stop, then print their exit codes

Usage

$ docker container wait CONTAINER [CONTAINER...]
+

Parent command

Command Description
docker container Manage containers
Command Description
docker container attach Attach local standard input, output, and error streams to a running container
docker container commit Create a new image from a container’s changes
docker container cp Copy files/folders between a container and the local filesystem
docker container create Create a new container
docker container diff Inspect changes to files or directories on a container’s filesystem
docker container exec Run a command in a running container
docker container export Export a container’s filesystem as a tar archive
docker container inspect Display detailed information on one or more containers
docker container kill Kill one or more running containers
docker container logs Fetch the logs of a container
docker container ls List containers
docker container pause Pause all processes within one or more containers
docker container port List port mappings or a specific mapping for the container
docker container prune Remove all stopped containers
docker container rename Rename a container
docker container restart Restart one or more containers
docker container rm Remove one or more containers
docker container run Run a command in a new container
docker container start Start one or more stopped containers
docker container stats Display a live stream of container(s) resource usage statistics
docker container stop Stop one or more running containers
docker container top Display the running processes of a container
docker container unpause Unpause all processes within one or more containers
docker container update Update configuration of one or more containers
docker container wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/container_wait/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext%2Findex.html new file mode 100644 index 00000000..4da064fd --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext%2Findex.html @@ -0,0 +1,7 @@ +

docker context


Manage contexts

Usage

$ docker context COMMAND
+

Description

Manage contexts.

Child commands

Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_create%2Findex.html new file mode 100644 index 00000000..8fa4d0bc --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_create%2Findex.html @@ -0,0 +1,25 @@ +

docker context create


Create a context

Usage

$ docker context create [OPTIONS] CONTEXT
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Creates a new context. This allows you to quickly switch the cli configuration to connect to different clusters or single nodes.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--default-stack-orchestrator +deprecated
Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)
--description Description of the context
--docker set the docker endpoint
--from create context from a named context
--kubernetes +deprecatedKubernetes
set the kubernetes endpoint

Examples

Create a context with a docker and kubernetes endpoint

To create a context from scratch provide the docker and, if required, kubernetes options. The example below creates the context my-context with a docker endpoint of /var/run/docker.sock and a kubernetes configuration sourced from the file /home/me/my-kube-config:

$ docker context create \
+    --docker host=unix:///var/run/docker.sock \
+    --kubernetes config-file=/home/me/my-kube-config \
+    my-context
+

Create a context based on an existing context

Use the --from=<context-name> option to create a new context from an existing context. The example below creates a new context named my-context from the existing context existing-context:

$ docker context create --from existing-context my-context
+

If the --from option is not set, the context is created from the current context:

$ docker context create my-context
+

This can be used to create a context out of an existing DOCKER_HOST based script:

$ source my-setup-script.sh
+$ docker context create my-context
+

To source only the docker endpoint configuration from an existing context use the --docker from=<context-name> option. The example below creates a new context named my-context using the docker endpoint configuration from the existing context existing-context and a kubernetes configuration sourced from the file /home/me/my-kube-config:

$ docker context create \
+    --docker from=existing-context \
+    --kubernetes config-file=/home/me/my-kube-config \
+    my-context
+

To source only the kubernetes configuration from an existing context use the --kubernetes from=<context-name> option. The example below creates a new context named my-context using the kuberentes configuration from the existing context existing-context and a docker endpoint of /var/run/docker.sock:

$ docker context create \
+    --docker host=unix:///var/run/docker.sock \
+    --kubernetes from=existing-context \
+    my-context
+

Docker and Kubernetes endpoints configurations, as well as default stack orchestrator and description can be modified with docker context update.

Refer to the docker context update reference for details.

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_export%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_export%2Findex.html new file mode 100644 index 00000000..06418070 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_export%2Findex.html @@ -0,0 +1,8 @@ +

docker context export


Export a context to a tar or kubeconfig file

Usage

$ docker context export [OPTIONS] CONTEXT [FILE|-]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Exports a context in a file that can then be used with docker context import (or with kubectl if --kubeconfig is set). Default output filename is <CONTEXT>.dockercontext, or <CONTEXT>.kubeconfig if --kubeconfig is set. To export to STDOUT, you can run docker context export my-context -.

Options

Name, shorthand Default Description
--kubeconfig +deprecatedKubernetes
Export as a kubeconfig file

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_export/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_import%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_import%2Findex.html new file mode 100644 index 00000000..c01e59cd --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_import%2Findex.html @@ -0,0 +1,7 @@ +

docker context import


Import a context from a tar or zip file

Usage

$ docker context import CONTEXT FILE|-
+

Description

Imports a context previously exported with docker context export. To import from stdin, use a hyphen (-) as filename.

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_import/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_inspect%2Findex.html new file mode 100644 index 00000000..c11ea1a6 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_inspect%2Findex.html @@ -0,0 +1,42 @@ +

docker context inspect


Display detailed information on one or more contexts

Usage

$ docker context inspect [OPTIONS] [CONTEXT] [CONTEXT...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Inspects one or more contexts.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template

Examples

Inspect a context by name

$ docker context inspect "local+aks"
+
+[
+  {
+    "Name": "local+aks",
+    "Metadata": {
+      "Description": "Local Docker Engine + Azure AKS endpoint",
+      "StackOrchestrator": "kubernetes"
+    },
+    "Endpoints": {
+      "docker": {
+        "Host": "npipe:////./pipe/docker_engine",
+        "SkipTLSVerify": false
+      },
+      "kubernetes": {
+        "Host": "https://simon-aks-***.hcp.uksouth.azmk8s.io:443",
+        "SkipTLSVerify": false,
+        "DefaultNamespace": "default"
+      }
+    },
+    "TLSMaterial": {
+      "kubernetes": [
+        "ca.pem",
+        "cert.pem",
+        "key.pem"
+      ]
+    },
+    "Storage": {
+      "MetadataPath": "C:\\Users\\simon\\.docker\\contexts\\meta\\cb6d08c0a1bfa5fe6f012e61a442788c00bed93f509141daff05f620fc54ddee",
+      "TLSPath": "C:\\Users\\simon\\.docker\\contexts\\tls\\cb6d08c0a1bfa5fe6f012e61a442788c00bed93f509141daff05f620fc54ddee"
+    }
+  }
+]
+

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_ls%2Findex.html new file mode 100644 index 00000000..636c24b0 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_ls%2Findex.html @@ -0,0 +1,15 @@ +

docker context ls


List contexts

Usage

$ docker context ls [OPTIONS]
+

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--format Pretty-print contexts using a Go template
+--quiet , -q + Only show context names

Examples

Use docker context ls to print all contexts. The currently active context is indicated with an *:

$ docker context ls
+
+NAME                DESCRIPTION                               DOCKER ENDPOINT                      KUBERNETES ENDPOINT   ORCHESTRATOR
+default *           Current DOCKER_HOST based configuration   unix:///var/run/docker.sock                                swarm
+production                                                    tcp:///prod.corp.example.com:2376
+staging                                                       tcp:///stage.corp.example.com:2376
+

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_rm%2Findex.html new file mode 100644 index 00000000..8b45c22d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_rm%2Findex.html @@ -0,0 +1,9 @@ +

docker context rm


Remove one or more contexts

Usage

$ docker context rm CONTEXT [CONTEXT...]
+

Options

Name, shorthand Default Description
+--force , -f + Force the removal of a context in use

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_update%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_update%2Findex.html new file mode 100644 index 00000000..f6088802 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_update%2Findex.html @@ -0,0 +1,13 @@ +

docker context update


Update a context

Usage

$ docker context update [OPTIONS] CONTEXT
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Updates an existing context. See context create.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--default-stack-orchestrator +deprecated
Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)
--description Description of the context
--docker set the docker endpoint
--kubernetes +deprecatedKubernetes
set the kubernetes endpoint

Examples

Update an existing context

$ docker context update \
+    --description "some description" \
+    --docker "host=tcp://myserver:2376,ca=~/ca-file,cert=~/cert-file,key=~/key-file" \
+    my-context
+

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_update/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_use%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_use%2Findex.html new file mode 100644 index 00000000..53fbba5a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcontext_use%2Findex.html @@ -0,0 +1,7 @@ +

docker context use


Set the current docker context

Usage

$ docker context use CONTEXT
+

Description

Set the default context to use, when DOCKER_HOST, DOCKER_CONTEXT environment variables and --host, --context global options are not set. To disable usage of contexts, you can use the special default context.

Parent command

Command Description
docker context Manage contexts
Command Description
docker context create Create a context
docker context export Export a context to a tar or kubeconfig file
docker context import Import a context from a tar or zip file
docker context inspect Display detailed information on one or more contexts
docker context ls List contexts
docker context rm Remove one or more contexts
docker context update Update a context
docker context use Set the current docker context
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/context_use/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcp%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcp%2Findex.html new file mode 100644 index 00000000..a23ca6bb --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcp%2Findex.html @@ -0,0 +1,32 @@ +

docker cp


Copy files/folders between a container and the local filesystem

Usage

$ docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|-
+docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker cp utility copies the contents of SRC_PATH to the DEST_PATH. You can copy from the container’s file system to the local machine or the reverse, from the local filesystem to the container. If - is specified for either the SRC_PATH or DEST_PATH, you can also stream a tar archive from STDIN or to STDOUT. The CONTAINER can be a running or stopped container. The SRC_PATH or DEST_PATH can be a file or directory.

The docker cp command assumes container paths are relative to the container’s / (root) directory. This means supplying the initial forward slash is optional; The command sees compassionate_darwin:/tmp/foo/myfile.txt and compassionate_darwin:tmp/foo/myfile.txt as identical. Local machine paths can be an absolute or relative value. The command interprets a local machine’s relative paths as relative to the current working directory where docker cp is run.

The cp command behaves like the Unix cp -a command in that directories are copied recursively with permissions preserved if possible. Ownership is set to the user and primary group at the destination. For example, files copied to a container are created with UID:GID of the root user. Files copied to the local machine are created with the UID:GID of the user which invoked the docker cp command. However, if you specify the -a option, docker cp sets the ownership to the user and primary group at the source. If you specify the -L option, docker cp follows any symbolic link in the SRC_PATH. docker cp does not create parent directories for DEST_PATH if they do not exist.

Assuming a path separator of /, a first argument of SRC_PATH and second argument of DEST_PATH, the behavior is as follows:

The command requires SRC_PATH and DEST_PATH to exist according to the above rules. If SRC_PATH is local and is a symbolic link, the symbolic link, not the target, is copied by default. To copy the link target and not the link, specify the -L option.

A colon (:) is used as a delimiter between CONTAINER and its path. You can also use : when specifying paths to a SRC_PATH or DEST_PATH on a local machine, for example file:name.txt. If you use a : in a local machine path, you must be explicit with a relative or absolute path, for example:

`/path/to/file:name.txt` or `./file:name.txt`
+

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--archive , -a + Archive mode (copy all uid/gid information)
+--follow-link , -L + Always follow symbol link in SRC_PATH

Examples

Copy a local file into container

$ docker cp ./some_file CONTAINER:/work
+

Copy files from container to local path

$ docker cp CONTAINER:/var/logs/ /tmp/app_logs
+

Copy a file from container to stdout. Please note cp command produces a tar stream

$ docker cp CONTAINER:/var/logs/app.log - | tar x -O | grep "ERROR"
+

Corner cases

It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. However, you can still copy such files by manually running tar in docker exec. Both of the following examples do the same thing in different ways (consider SRC_PATH and DEST_PATH are directories):

$ docker exec CONTAINER tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH -
+
$ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i CONTAINER tar Cxf DEST_PATH -
+

Using - as the SRC_PATH streams the contents of STDIN as a tar archive. The command extracts the content of the tar to the DEST_PATH in container’s filesystem. In this case, DEST_PATH must specify a directory. Using - as the DEST_PATH streams the contents of the resource as a tar archive to STDOUT.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/cp/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fcreate%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcreate%2Findex.html new file mode 100644 index 00000000..940850dd --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fcreate%2Findex.html @@ -0,0 +1,69 @@ +

docker create


Create a new container

Usage

$ docker create [OPTIONS] IMAGE [COMMAND] [ARG...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker container create (or shorthand: docker create) command creates a new container from the specified image, without starting it.

When creating a container, the docker daemon creates a writeable container layer over the specified image and prepares it for running the specified command. The container ID is then printed to STDOUT. This is similar to docker run -d except the container is never started. You can then use the docker container start (or shorthand: docker start) command to start the container at any point.

This is useful when you want to set up a container configuration ahead of time so that it is ready to start when you need it. The initial status of the new container is created.

The docker create command shares most of its options with the docker run command (which performs a docker create before starting it). Refer to the docker run command section and the Docker run reference for details on the available flags and options.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--add-host Add a custom host-to-IP mapping (host:ip)
+--attach , -a + Attach to STDIN, STDOUT or STDERR
--blkio-weight Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)
--blkio-weight-device Block IO weight (relative device weight)
--cap-add Add Linux capabilities
--cap-drop Drop Linux capabilities
--cgroup-parent Optional parent cgroup for the container
--cgroupns +API 1.41+
Cgroup namespace to use (host|private) 'host': Run the container in the Docker host's cgroup namespace 'private': Run the container in its own private cgroup namespace '': Use the cgroup namespace as configured by the default-cgroupns-mode option on the daemon (default)
--cidfile Write the container ID to the file
--cpu-count CPU count (Windows only)
--cpu-percent CPU percent (Windows only)
--cpu-period Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit CPU CFS (Completely Fair Scheduler) quota
--cpu-rt-period Limit CPU real-time period in microseconds
--cpu-rt-runtime Limit CPU real-time runtime in microseconds
+--cpu-shares , -c + CPU shares (relative weight)
--cpus Number of CPUs
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
--device Add a host device to the container
--device-cgroup-rule Add a rule to the cgroup allowed devices list
--device-read-bps Limit read rate (bytes per second) from a device
--device-read-iops Limit read rate (IO per second) from a device
--device-write-bps Limit write rate (bytes per second) to a device
--device-write-iops Limit write rate (IO per second) to a device
--disable-content-trust true Skip image verification
--dns Set custom DNS servers
--dns-opt Set DNS options
--dns-option Set DNS options
--dns-search Set custom DNS search domains
--domainname Container NIS domain name
--entrypoint Overwrite the default ENTRYPOINT of the image
+--env , -e + Set environment variables
--env-file Read in a file of environment variables
--expose Expose a port or a range of ports
--gpus +API 1.40+
GPU devices to add to the container ('all' to pass all GPUs)
--group-add Add additional groups to join
--health-cmd Command to run to check health
--health-interval Time between running the check (ms|s|m|h) (default 0s)
--health-retries Consecutive failures needed to report unhealthy
--health-start-period Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)
--health-timeout Maximum time to allow one check to run (ms|s|m|h) (default 0s)
--help Print usage
+--hostname , -h + Container host name
--init Run an init inside the container that forwards signals and reaps processes
+--interactive , -i + Keep STDIN open even if not attached
--io-maxbandwidth Maximum IO bandwidth limit for the system drive (Windows only)
--io-maxiops Maximum IOps limit for the system drive (Windows only)
--ip IPv4 address (e.g., 172.30.100.104)
--ip6 IPv6 address (e.g., 2001:db8::33)
--ipc IPC mode to use
--isolation Container isolation technology
--kernel-memory Kernel memory limit
+--label , -l + Set meta data on a container
--label-file Read in a line delimited file of labels
--link Add link to another container
--link-local-ip Container IPv4/IPv6 link-local addresses
--log-driver Logging driver for the container
--log-opt Log driver options
--mac-address Container MAC address (e.g., 92:d0:c6:0a:29:33)
+--memory , -m + Memory limit
--memory-reservation Memory soft limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--memory-swappiness -1 Tune container memory swappiness (0 to 100)
--mount Attach a filesystem mount to the container
--name Assign a name to the container
--net Connect a container to a network
--net-alias Add network-scoped alias for the container
--network Connect a container to a network
--network-alias Add network-scoped alias for the container
--no-healthcheck Disable any container-specified HEALTHCHECK
--oom-kill-disable Disable OOM Killer
--oom-score-adj Tune host's OOM preferences (-1000 to 1000)
--pid PID namespace to use
--pids-limit Tune container pids limit (set -1 for unlimited)
--platform Set platform if server is multi-platform capable
--privileged Give extended privileges to this container
+--publish , -p + Publish a container's port(s) to the host
+--publish-all , -P + Publish all exposed ports to random ports
--pull missing Pull image before creating ("always"|"missing"|"never")
--read-only Mount the container's root filesystem as read only
--restart no Restart policy to apply when a container exits
--rm Automatically remove the container when it exits
--runtime Runtime to use for this container
--security-opt Security Options
--shm-size Size of /dev/shm
--stop-signal SIGTERM Signal to stop a container
--stop-timeout Timeout (in seconds) to stop a container
--storage-opt Storage driver options for the container
--sysctl Sysctl options
--tmpfs Mount a tmpfs directory
+--tty , -t + Allocate a pseudo-TTY
--ulimit Ulimit options
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
--userns User namespace to use
--uts UTS namespace to use
+--volume , -v + Bind mount a volume
--volume-driver Optional volume driver for the container
--volumes-from Mount volumes from the specified container(s)
+--workdir , -w + Working directory inside the container

Examples

Create and start a container

The following example creates an interactive container with a pseudo-TTY attached, then starts the container and attaches to it:

$ docker container create -i -t --name mycontainer alpine
+6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752
+
+$ docker container start --attach -i mycontainer
+/ # echo hello world
+hello world
+

The above is the equivalent of a docker run:

$ docker run -it --name mycontainer2 alpine
+/ # echo hello world
+hello world
+

Initialize volumes

Container volumes are initialized during the docker create phase (i.e., docker run too). For example, this allows you to create the data volume container, and then use it from another container:

$ docker create -v /data --name data ubuntu
+
+240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57
+
+$ docker run --rm --volumes-from data ubuntu ls -la /data
+
+total 8
+drwxr-xr-x  2 root root 4096 Dec  5 04:10 .
+drwxr-xr-x 48 root root 4096 Dec  5 04:11 ..
+

Similarly, create a host directory bind mounted volume container, which can then be used from the subsequent container:

$ docker create -v /home/docker:/docker --name docker ubuntu
+
+9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03
+
+$ docker run --rm --volumes-from docker ubuntu ls -la /docker
+
+total 20
+drwxr-sr-x  5 1000 staff  180 Dec  5 04:00 .
+drwxr-xr-x 48 root root  4096 Dec  5 04:13 ..
+-rw-rw-r--  1 1000 staff 3833 Dec  5 04:01 .ash_history
+-rw-r--r--  1 1000 staff  446 Nov 28 11:51 .ashrc
+-rw-r--r--  1 1000 staff   25 Dec  5 04:00 .gitconfig
+drwxr-sr-x  3 1000 staff   60 Dec  1 03:28 .local
+-rw-r--r--  1 1000 staff  920 Nov 28 11:51 .profile
+drwx--S---  2 1000 staff  460 Dec  5 00:51 .ssh
+drwxr-xr-x 32 1000 staff 1140 Dec  5 04:01 docker
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fdiff%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fdiff%2Findex.html new file mode 100644 index 00000000..a99adac7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fdiff%2Findex.html @@ -0,0 +1,28 @@ +

docker diff


Inspect changes to files or directories on a container’s filesystem

Usage

$ docker diff CONTAINER
+

Description

List the changed files and directories in a container᾿s filesystem since the container was created. Three different types of change are tracked:

Symbol Description
A A file or directory was added
D A file or directory was deleted
C A file or directory was changed

You can use the full or shortened container ID or the container name set using docker run --name option.

For example uses of this command, refer to the examples section below.

Examples

Inspect the changes to an nginx container:

$ docker diff 1fdfd1f54c1b
+
+C /dev
+C /dev/console
+C /dev/core
+C /dev/stdout
+C /dev/fd
+C /dev/ptmx
+C /dev/stderr
+C /dev/stdin
+C /run
+A /run/nginx.pid
+C /var/lib/nginx/tmp
+A /var/lib/nginx/tmp/client_body
+A /var/lib/nginx/tmp/fastcgi
+A /var/lib/nginx/tmp/proxy
+A /var/lib/nginx/tmp/scgi
+A /var/lib/nginx/tmp/uwsgi
+C /var/log/nginx
+A /var/log/nginx/access.log
+A /var/log/nginx/error.log
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/diff/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fdocker%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fdocker%2Findex.html new file mode 100644 index 00000000..4644fc57 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fdocker%2Findex.html @@ -0,0 +1,6 @@ +

docker


The base command for the Docker CLI.

Child commands

Command Description
docker attach Attach local standard input, output, and error streams to a running container
docker build Build an image from a Dockerfile
docker builder Manage builds
docker checkpoint Manage checkpoints
docker commit Create a new image from a container’s changes
docker config Manage Docker configs
docker container Manage containers
docker context Manage contexts
docker cp Copy files/folders between a container and the local filesystem
docker create Create a new container
docker diff Inspect changes to files or directories on a container’s filesystem
docker events Get real time events from the server
docker exec Run a command in a running container
docker export Export a container’s filesystem as a tar archive
docker history Show the history of an image
docker image Manage images
docker images List images
docker import Import the contents from a tarball to create a filesystem image
docker info Display system-wide information
docker inspect Return low-level information on Docker objects
docker kill Kill one or more running containers
docker load Load an image from a tar archive or STDIN
docker login Log in to a Docker registry
docker logout Log out from a Docker registry
docker logs Fetch the logs of a container
docker manifest Manage Docker image manifests and manifest lists
docker network Manage networks
docker node Manage Swarm nodes
docker pause Pause all processes within one or more containers
docker plugin Manage plugins
docker port List port mappings or a specific mapping for the container
docker ps List containers
docker pull Pull an image or a repository from a registry
docker push Push an image or a repository to a registry
docker rename Rename a container
docker restart Restart one or more containers
docker rm Remove one or more containers
docker rmi Remove one or more images
docker run Run a command in a new container
docker save Save one or more images to a tar archive (streamed to STDOUT by default)
docker search Search the Docker Hub for images
docker secret Manage Docker secrets
docker service Manage services
docker stack Manage Docker stacks
docker start Start one or more stopped containers
docker stats Display a live stream of container(s) resource usage statistics
docker stop Stop one or more running containers
docker swarm Manage Swarm
docker system Manage Docker
docker tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
docker top Display the running processes of a container
docker trust Manage trust on Docker images
docker unpause Unpause all processes within one or more containers
docker update Update configuration of one or more containers
docker version Show the Docker version information
docker volume Manage volumes
docker wait Block until one or more containers stop, then print their exit codes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/docker/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fdockerd%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fdockerd%2Findex.html new file mode 100644 index 00000000..d8961c69 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fdockerd%2Findex.html @@ -0,0 +1,428 @@ +

dockerd

daemon

Usage: dockerd COMMAND
+
+A self-sufficient runtime for containers.
+
+Options:
+      --add-runtime runtime                   Register an additional OCI compatible runtime (default [])
+      --allow-nondistributable-artifacts list Allow push of nondistributable artifacts to registry
+      --api-cors-header string                Set CORS headers in the Engine API
+      --authorization-plugin list             Authorization plugins to load
+      --bip string                            Specify network bridge IP
+  -b, --bridge string                         Attach containers to a network bridge
+      --cgroup-parent string                  Set parent cgroup for all containers
+      --config-file string                    Daemon configuration file (default "/etc/docker/daemon.json")
+      --containerd string                     containerd grpc address
+      --containerd-namespace string           Containerd namespace to use (default "moby")
+      --containerd-plugins-namespace string   Containerd namespace to use for plugins (default "plugins.moby")
+      --cpu-rt-period int                     Limit the CPU real-time period in microseconds for the
+                                              parent cgroup for all containers
+      --cpu-rt-runtime int                    Limit the CPU real-time runtime in microseconds for the
+                                              parent cgroup for all containers
+      --cri-containerd                        start containerd with cri
+      --data-root string                      Root directory of persistent Docker state (default "/var/lib/docker")
+  -D, --debug                                 Enable debug mode
+      --default-address-pool pool-options     Default address pools for node specific local networks
+      --default-cgroupns-mode string          Default mode for containers cgroup namespace ("host" | "private") (default "host")
+      --default-gateway ip                    Container default gateway IPv4 address
+      --default-gateway-v6 ip                 Container default gateway IPv6 address
+      --default-ipc-mode string               Default mode for containers ipc ("shareable" | "private") (default "private")
+      --default-runtime string                Default OCI runtime for containers (default "runc")
+      --default-shm-size bytes                Default shm size for containers (default 64MiB)
+      --default-ulimit ulimit                 Default ulimits for containers (default [])
+      --dns list                              DNS server to use
+      --dns-opt list                          DNS options to use
+      --dns-search list                       DNS search domains to use
+      --exec-opt list                         Runtime execution options
+      --exec-root string                      Root directory for execution state files (default "/var/run/docker")
+      --experimental                          Enable experimental features
+      --fixed-cidr string                     IPv4 subnet for fixed IPs
+      --fixed-cidr-v6 string                  IPv6 subnet for fixed IPs
+  -G, --group string                          Group for the unix socket (default "docker")
+      --help                                  Print usage
+  -H, --host list                             Daemon socket(s) to connect to
+      --host-gateway-ip ip                    IP address that the special 'host-gateway' string in --add-host resolves to.
+                                              Defaults to the IP address of the default bridge
+      --icc                                   Enable inter-container communication (default true)
+      --init                                  Run an init in the container to forward signals and reap processes
+      --init-path string                      Path to the docker-init binary
+      --insecure-registry list                Enable insecure registry communication
+      --ip ip                                 Default IP when binding container ports (default 0.0.0.0)
+      --ip-forward                            Enable net.ipv4.ip_forward (default true)
+      --ip-masq                               Enable IP masquerading (default true)
+      --iptables                              Enable addition of iptables rules (default true)
+      --ip6tables                             Enable addition of ip6tables rules (default false)
+      --ipv6                                  Enable IPv6 networking
+      --label list                            Set key=value labels to the daemon
+      --live-restore                          Enable live restore of docker when containers are still running
+      --log-driver string                     Default driver for container logs (default "json-file")
+  -l, --log-level string                      Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info")
+      --log-opt map                           Default log driver options for containers (default map[])
+      --max-concurrent-downloads int          Set the max concurrent downloads for each pull (default 3)
+      --max-concurrent-uploads int            Set the max concurrent uploads for each push (default 5)
+      --max-download-attempts int             Set the max download attempts for each pull (default 5)
+      --metrics-addr string                   Set default address and port to serve the metrics api on
+      --mtu int                               Set the containers network MTU
+      --network-control-plane-mtu int         Network Control plane MTU (default 1500)
+      --no-new-privileges                     Set no-new-privileges by default for new containers
+      --node-generic-resource list            Advertise user-defined resource
+      --oom-score-adjust int                  Set the oom_score_adj for the daemon (default -500)
+  -p, --pidfile string                        Path to use for daemon PID file (default "/var/run/docker.pid")
+      --raw-logs                              Full timestamps without ANSI coloring
+      --registry-mirror list                  Preferred Docker registry mirror
+      --rootless                              Enable rootless mode; typically used with RootlessKit
+      --seccomp-profile string                Path to seccomp profile
+      --selinux-enabled                       Enable selinux support
+      --shutdown-timeout int                  Set the default shutdown timeout (default 15)
+  -s, --storage-driver string                 Storage driver to use
+      --storage-opt list                      Storage driver options
+      --swarm-default-advertise-addr string   Set default address or interface for swarm advertised address
+      --tls                                   Use TLS; implied by --tlsverify
+      --tlscacert string                      Trust certs signed only by this CA (default "~/.docker/ca.pem")
+      --tlscert string                        Path to TLS certificate file (default "~/.docker/cert.pem")
+      --tlskey string                         Path to TLS key file (default "~/.docker/key.pem")
+      --tlsverify                             Use TLS and verify the remote
+      --userland-proxy                        Use userland proxy for loopback traffic (default true)
+      --userland-proxy-path string            Path to the userland proxy binary
+      --userns-remap string                   User/Group setting for user namespaces
+  -v, --version                               Print version information and quit
+

Options with [] may be specified multiple times.

Description

dockerd is the persistent process that manages containers. Docker uses different binaries for the daemon and client. To run the daemon you type dockerd.

To run the daemon with debug output, use dockerd --debug or add "debug": true to the daemon.json file.

Enabling experimental features

Enable experimental features by starting dockerd with the --experimental flag or adding "experimental": true to the daemon.json file.

Environment variables

For easy reference, the following list of environment variables are supported by the dockerd command line:

Examples

Daemon socket option

The Docker daemon can listen for Docker Engine API requests via three different types of Socket: unix, tcp, and fd.

By default, a unix domain socket (or IPC socket) is created at /var/run/docker.sock, requiring either root permission, or docker group membership.

If you need to access the Docker daemon remotely, you need to enable the tcp Socket. Beware that the default setup provides un-encrypted and un-authenticated direct access to the Docker daemon - and should be secured either using the built in HTTPS encrypted socket, or by putting a secure web proxy in front of it. You can listen on port 2375 on all network interfaces with -H tcp://0.0.0.0:2375, or on a particular network interface using its IP address: -H tcp://192.168.59.103:2375. It is conventional to use port 2375 for un-encrypted, and port 2376 for encrypted communication with the daemon.

Note

If you’re using an HTTPS encrypted socket, keep in mind that only TLS1.0 and greater are supported. Protocols SSLv3 and under are not supported anymore for security reasons.

On Systemd based systems, you can communicate with the daemon via Systemd socket activation, use dockerd -H fd://. Using fd:// will work perfectly for most setups but you can also specify individual sockets: dockerd -H fd://3. If the specified socket activated files aren’t found, then Docker will exit. You can find examples of using Systemd socket activation with Docker and Systemd in the Docker source tree.

You can configure the Docker daemon to listen to multiple sockets at the same time using multiple -H options:

The example below runs the daemon listenin on the default unix socket, and on 2 specific IP addresses on this host:

$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2
+

The Docker client will honor the DOCKER_HOST environment variable to set the -H flag for the client. Use one of the following commands:

$ docker -H tcp://0.0.0.0:2375 ps
+
$ export DOCKER_HOST="tcp://0.0.0.0:2375"
+
+$ docker ps
+

Setting the DOCKER_TLS_VERIFY environment variable to any value other than the empty string is equivalent to setting the --tlsverify flag. The following are equivalent:

$ docker --tlsverify ps
+# or
+$ export DOCKER_TLS_VERIFY=1
+$ docker ps
+

The Docker client will honor the HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables (or the lowercase versions thereof). HTTPS_PROXY takes precedence over HTTP_PROXY.

The Docker client supports connecting to a remote daemon via SSH:

$ docker -H ssh://me@example.com:22 ps
+$ docker -H ssh://me@example.com ps
+$ docker -H ssh://example.com ps
+

To use SSH connection, you need to set up ssh so that it can reach the remote host with public key authentication. Password authentication is not supported. If your key is protected with passphrase, you need to set up ssh-agent.

Bind Docker to another host/port or a Unix socket

Warning

Changing the default docker daemon binding to a TCP port or Unix docker user group will increase your security risks by allowing non-root users to gain root access on the host. Make sure you control access to docker. If you are binding to a TCP port, anyone with access to that port has full Docker access; so it is not advisable on an open network.

With -H it is possible to make the Docker daemon to listen on a specific IP and port. By default, it will listen on unix:///var/run/docker.sock to allow only local connections by the root user. You could set it to 0.0.0.0:2375 or a specific host IP to give access to everybody, but that is not recommended because then it is trivial for someone to gain root access to the host where the daemon is running.

Similarly, the Docker client can use -H to connect to a custom port. The Docker client will default to connecting to unix:///var/run/docker.sock on Linux, and tcp://127.0.0.1:2376 on Windows.

-H accepts host and port assignment in the following format:

tcp://[host]:[port][path] or unix://path
+

For example:

-H, when empty, will default to the same value as when no -H was passed in.

-H also accepts short form for TCP bindings: host: or host:port or :port

Run Docker in daemon mode:

$ sudo <path to>/dockerd -H 0.0.0.0:5555 &
+

Download an ubuntu image:

$ docker -H :5555 pull ubuntu
+

You can use multiple -H, for example, if you want to listen on both TCP and a Unix socket

$ sudo dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock &
+# Download an ubuntu image, use default Unix socket
+$ docker pull ubuntu
+# OR use the TCP port
+$ docker -H tcp://127.0.0.1:2375 pull ubuntu
+

Daemon storage-driver

On Linux, the Docker daemon has support for several different image layer storage drivers: aufs, devicemapper, btrfs, zfs, overlay, overlay2, and fuse-overlayfs.

The aufs driver is the oldest, but is based on a Linux kernel patch-set that is unlikely to be merged into the main kernel. These are also known to cause some serious kernel crashes. However aufs allows containers to share executable and shared library memory, so is a useful choice when running thousands of containers with the same program or libraries.

The devicemapper driver uses thin provisioning and Copy on Write (CoW) snapshots. For each devicemapper graph location – typically /var/lib/docker/devicemapper – a thin pool is created based on two block devices, one for data and one for metadata. By default, these block devices are created automatically by using loopback mounts of automatically created sparse files. Refer to Devicemapper options below for a way how to customize this setup. ~jpetazzo/Resizing Docker containers with the Device Mapper plugin article explains how to tune your existing setup without the use of options.

The btrfs driver is very fast for docker build - but like devicemapper does not share executable memory between devices. Use dockerd --storage-driver btrfs --data-root /mnt/btrfs_partition.

The zfs driver is probably not as fast as btrfs but has a longer track record on stability. Thanks to Single Copy ARC shared blocks between clones will be cached only once. Use dockerd -s zfs. To select a different zfs filesystem set zfs.fsname option as described in ZFS options.

The overlay is a very fast union filesystem. It is now merged in the main Linux kernel as of 3.18.0. overlay also supports page cache sharing, this means multiple containers accessing the same file can share a single page cache entry (or entries), it makes overlay as efficient with memory as aufs driver. Call dockerd -s overlay to use it.

The overlay2 uses the same fast union filesystem but takes advantage of additional features added in Linux kernel 4.0 to avoid excessive inode consumption. Call dockerd -s overlay2 to use it.

Note

The overlay storage driver can cause excessive inode consumption (especially as the number of images grows). We recommend using the overlay2 storage driver instead.

Note

Both overlay and overlay2 are currently unsupported on btrfs or any Copy on Write filesystem and should only be used over ext4 partitions.

The fuse-overlayfs driver is similar to overlay2 but works in userspace. The fuse-overlayfs driver is expected to be used for Rootless mode.

On Windows, the Docker daemon supports a single image layer storage driver depending on the image platform: windowsfilter for Windows images, and lcow for Linux containers on Windows.

Options per storage driver

Particular storage-driver can be configured with options specified with --storage-opt flags. Options for devicemapper are prefixed with dm, options for zfs start with zfs, options for btrfs start with btrfs and options for lcow start with lcow.

Devicemapper options

This is an example of the configuration file for devicemapper on Linux:

{
+  "storage-driver": "devicemapper",
+  "storage-opts": [
+    "dm.thinpooldev=/dev/mapper/thin-pool",
+    "dm.use_deferred_deletion=true",
+    "dm.use_deferred_removal=true"
+  ]
+}
+
dm.thinpooldev

Specifies a custom block storage device to use for the thin pool.

If using a block device for device mapper storage, it is best to use lvm to create and manage the thin-pool volume. This volume is then handed to Docker to exclusively create snapshot volumes needed for images and containers.

Managing the thin-pool outside of Engine makes for the most feature-rich method of having Docker utilize device mapper thin provisioning as the backing storage for Docker containers. The highlights of the lvm-based thin-pool management feature include: automatic or interactive thin-pool resize support, dynamically changing thin-pool features, automatic thinp metadata checking when lvm activates the thin-pool, etc.

As a fallback if no thin pool is provided, loopback files are created. Loopback is very slow, but can be used without any pre-configuration of storage. It is strongly recommended that you do not use loopback in production. Ensure your Engine daemon has a --storage-opt dm.thinpooldev argument provided.

Example:
$ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool
+
dm.directlvm_device

As an alternative to providing a thin pool as above, Docker can setup a block device for you.

Example:
$ sudo dockerd --storage-opt dm.directlvm_device=/dev/xvdf
+
dm.thinp_percent

Sets the percentage of passed in block device to use for storage.

Example:
$ sudo dockerd --storage-opt dm.thinp_percent=95
+
dm.thinp_metapercent

Sets the percentage of the passed in block device to use for metadata storage.

Example:
$ sudo dockerd --storage-opt dm.thinp_metapercent=1
+
dm.thinp_autoextend_threshold

Sets the value of the percentage of space used before lvm attempts to autoextend the available space [100 = disabled]

Example:
$ sudo dockerd --storage-opt dm.thinp_autoextend_threshold=80
+
dm.thinp_autoextend_percent

Sets the value percentage value to increase the thin pool by when lvm attempts to autoextend the available space [100 = disabled]

Example:
$ sudo dockerd --storage-opt dm.thinp_autoextend_percent=20
+
dm.basesize

Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices are inherently “sparse”, so a 10G device which is mostly empty doesn’t use 10 GB of space on the pool. However, the filesystem will use more space for the empty case the larger the device is.

The base device size can be increased at daemon restart which will allow all future images and containers (based on those new images) to be of the new base device size.

Examples
$ sudo dockerd --storage-opt dm.basesize=50G
+

This will increase the base device size to 50G. The Docker daemon will throw an error if existing base device size is larger than 50G. A user can use this option to expand the base device size however shrinking is not permitted.

This value affects the system-wide “base” empty filesystem that may already be initialized and inherited by pulled images. Typically, a change to this value requires additional steps to take effect:

$ sudo service docker stop
+
+$ sudo rm -rf /var/lib/docker
+
+$ sudo service docker start
+
dm.loopdatasize

Note

This option configures devicemapper loopback, which should not be used in production.

Specifies the size to use when creating the loopback file for the “data” device which is used for the thin pool. The default size is 100G. The file is sparse, so it will not initially take up this much space.

Example
$ sudo dockerd --storage-opt dm.loopdatasize=200G
+
dm.loopmetadatasize

Note

This option configures devicemapper loopback, which should not be used in production.

Specifies the size to use when creating the loopback file for the “metadata” device which is used for the thin pool. The default size is 2G. The file is sparse, so it will not initially take up this much space.

Example
$ sudo dockerd --storage-opt dm.loopmetadatasize=4G
+
dm.fs

Specifies the filesystem type to use for the base device. The supported options are “ext4” and “xfs”. The default is “xfs”

Example
$ sudo dockerd --storage-opt dm.fs=ext4
+
dm.mkfsarg

Specifies extra mkfs arguments to be used when creating the base device.

Example
$ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal"
+
dm.mountopt

Specifies extra mount options used when mounting the thin devices.

Example
$ sudo dockerd --storage-opt dm.mountopt=nodiscard
+
dm.datadev

(Deprecated, use dm.thinpooldev)

Specifies a custom blockdevice to use for data for the thin pool.

If using a block device for device mapper storage, ideally both datadev and metadatadev should be specified to completely avoid using the loopback device.

Example
$ sudo dockerd \
+      --storage-opt dm.datadev=/dev/sdb1 \
+      --storage-opt dm.metadatadev=/dev/sdc1
+
dm.metadatadev

(Deprecated, use dm.thinpooldev)

Specifies a custom blockdevice to use for metadata for the thin pool.

For best performance the metadata should be on a different spindle than the data, or even better on an SSD.

If setting up a new metadata pool it is required to be valid. This can be achieved by zeroing the first 4k to indicate empty metadata, like this:

$ dd if=/dev/zero of=$metadata_dev bs=4096 count=1
+
Example
$ sudo dockerd \
+      --storage-opt dm.datadev=/dev/sdb1 \
+      --storage-opt dm.metadatadev=/dev/sdc1
+
dm.blocksize

Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K.

Example
$ sudo dockerd --storage-opt dm.blocksize=512K
+
dm.blkdiscard

Enables or disables the use of blkdiscard when removing devicemapper devices. This is enabled by default (only) if using loopback devices and is required to resparsify the loopback file on image/container removal.

Disabling this on loopback can lead to much faster container removal times, but will make the space used in /var/lib/docker directory not be returned to the system for other use when containers are removed.

Examples
$ sudo dockerd --storage-opt dm.blkdiscard=false
+
dm.override_udev_sync_check

Overrides the udev synchronization checks between devicemapper and udev. udev is the device manager for the Linux kernel.

To view the udev sync support of a Docker daemon that is using the devicemapper driver, run:

$ docker info
+<...>
+Udev Sync Supported: true
+<...>
+

When udev sync support is true, then devicemapper and udev can coordinate the activation and deactivation of devices for containers.

When udev sync support is false, a race condition occurs between thedevicemapper and udev during create and cleanup. The race condition results in errors and failures. (For information on these failures, see docker#4036)

To allow the docker daemon to start, regardless of udev sync not being supported, set dm.override_udev_sync_check to true:

$ sudo dockerd --storage-opt dm.override_udev_sync_check=true
+

When this value is true, the devicemapper continues and simply warns you the errors are happening.

Note

The ideal is to pursue a docker daemon and environment that does support synchronizing with udev. For further discussion on this topic, see docker#4036. Otherwise, set this flag for migrating existing Docker daemons to a daemon with a supported environment.

dm.use_deferred_removal

Enables use of deferred device removal if libdm and the kernel driver support the mechanism.

Deferred device removal means that if device is busy when devices are being removed/deactivated, then a deferred removal is scheduled on device. And devices automatically go away when last user of the device exits.

For example, when a container exits, its associated thin device is removed. If that device has leaked into some other mount namespace and can’t be removed, the container exit still succeeds and this option causes the system to schedule the device for deferred removal. It does not wait in a loop trying to remove a busy device.

Example
$ sudo dockerd --storage-opt dm.use_deferred_removal=true
+
dm.use_deferred_deletion

Enables use of deferred device deletion for thin pool devices. By default, thin pool device deletion is synchronous. Before a container is deleted, the Docker daemon removes any associated devices. If the storage driver can not remove a device, the container deletion fails and daemon returns.

Error deleting container: Error response from daemon: Cannot destroy container
+

To avoid this failure, enable both deferred device deletion and deferred device removal on the daemon.

$ sudo dockerd \
+      --storage-opt dm.use_deferred_deletion=true \
+      --storage-opt dm.use_deferred_removal=true
+

With these two options enabled, if a device is busy when the driver is deleting a container, the driver marks the device as deleted. Later, when the device isn’t in use, the driver deletes it.

In general it should be safe to enable this option by default. It will help when unintentional leaking of mount point happens across multiple mount namespaces.

dm.min_free_space

Specifies the min free space percent in a thin pool require for new device creation to succeed. This check applies to both free data space as well as free metadata space. Valid values are from 0% - 99%. Value 0% disables free space checking logic. If user does not specify a value for this option, the Engine uses a default value of 10%.

Whenever a new a thin pool device is created (during docker pull or during container creation), the Engine checks if the minimum free space is available. If sufficient space is unavailable, then device creation fails and any relevant docker operation fails.

To recover from this error, you must create more free space in the thin pool to recover from the error. You can create free space by deleting some images and containers from the thin pool. You can also add more storage to the thin pool.

To add more space to a LVM (logical volume management) thin pool, just add more storage to the volume group container thin pool; this should automatically resolve any errors. If your configuration uses loop devices, then stop the Engine daemon, grow the size of loop files and restart the daemon to resolve the issue.

Example
$ sudo dockerd --storage-opt dm.min_free_space=10%
+
dm.xfs_nospace_max_retries

Specifies the maximum number of retries XFS should attempt to complete IO when ENOSPC (no space) error is returned by underlying storage device.

By default XFS retries infinitely for IO to finish and this can result in unkillable process. To change this behavior one can set xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting ENOSPC and will shutdown filesystem.

Example
$ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0
+
dm.libdm_log_level

Specifies the maxmimum libdm log level that will be forwarded to the dockerd log (as specified by --log-level). This option is primarily intended for debugging problems involving libdm. Using values other than the defaults may cause false-positive warnings to be logged.

Values specified must fall within the range of valid libdm log levels. At the time of writing, the following is the list of libdm log levels as well as their corresponding levels when output by dockerd.

+libdm Level Value --log-level
_LOG_FATAL 2 error
_LOG_ERR 3 error
_LOG_WARN 4 warn
_LOG_NOTICE 5 info
_LOG_INFO 6 info
_LOG_DEBUG 7 debug
Example
$ sudo dockerd \
+      --log-level debug \
+      --storage-opt dm.libdm_log_level=7
+

ZFS options

zfs.fsname

Set zfs filesystem under which docker will create its own datasets. By default docker will pick up the zfs filesystem where docker graph (/var/lib/docker) is located.

Example
$ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker
+

Btrfs options

btrfs.min_space

Specifies the minimum size to use when creating the subvolume which is used for containers. If user uses disk quota for btrfs when creating or running a container with --storage-opt size option, docker should ensure the size cannot be smaller than btrfs.min_space.

Example
$ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G
+

Overlay2 options

overlay2.override_kernel_check

Overrides the Linux kernel version check allowing overlay2. Support for specifying multiple lower directories needed by overlay2 was added to the Linux kernel in 4.0.0. However, some older kernel versions may be patched to add multiple lower directory support for OverlayFS. This option should only be used after verifying this support exists in the kernel. Applying this option on a kernel without this support will cause failures on mount.

overlay2.size

Sets the default max size of the container. It is supported only when the backing fs is xfs and mounted with pquota mount option. Under these conditions the user can pass any size less then the backing fs size.

Example
$ sudo dockerd -s overlay2 --storage-opt overlay2.size=1G
+

Windowsfilter options

size

Specifies the size to use when creating the sandbox which is used for containers. Defaults to 20G.

Example
C:\> dockerd --storage-opt size=40G
+

LCOW (Linux Containers on Windows) options

lcow.globalmode

Specifies whether the daemon instantiates utility VM instances as required (recommended and default if omitted), or uses single global utility VM (better performance, but has security implications and not recommended for production deployments).

Example
C:\> dockerd --storage-opt lcow.globalmode=false
+
lcow.kirdpath

Specifies the folder path to the location of a pair of kernel and initrd files used for booting a utility VM. Defaults to %ProgramFiles%\Linux Containers.

Example
C:\> dockerd --storage-opt lcow.kirdpath=c:\path\to\files
+
lcow.kernel

Specifies the filename of a kernel file located in the lcow.kirdpath path. Defaults to bootx64.efi.

Example
C:\> dockerd --storage-opt lcow.kernel=kernel.efi
+
lcow.initrd

Specifies the filename of an initrd file located in the lcow.kirdpath path. Defaults to initrd.img.

Example
C:\> dockerd --storage-opt lcow.initrd=myinitrd.img
+
lcow.bootparameters

Specifies additional boot parameters for booting utility VMs when in kernel/ initrd mode. Ignored if the utility VM is booting from VHD. These settings are kernel specific.

Example
C:\> dockerd --storage-opt "lcow.bootparameters='option=value'"
+
lcow.vhdx

Specifies a custom VHDX to boot a utility VM, as an alternate to kernel and initrd booting. Defaults to uvm.vhdx under lcow.kirdpath.

Example
C:\> dockerd --storage-opt lcow.vhdx=custom.vhdx
+
lcow.timeout

Specifies the timeout for utility VM operations in seconds. Defaults to 300.

Example
C:\> dockerd --storage-opt lcow.timeout=240
+
lcow.sandboxsize

Specifies the size in GB to use when creating the sandbox which is used for containers. Defaults to 20. Cannot be less than 20.

Example
C:\> dockerd --storage-opt lcow.sandboxsize=40
+

Docker runtime execution options

The Docker daemon relies on a OCI compliant runtime (invoked via the containerd daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux.

By default, the Docker daemon automatically starts containerd. If you want to control containerd startup, manually start containerd and pass the path to the containerd socket using the --containerd flag. For example:

$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock
+

Runtimes can be registered with the daemon either via the configuration file or using the --add-runtime command line argument.

The following is an example adding 2 runtimes via the configuration:

{
+  "default-runtime": "runc",
+  "runtimes": {
+    "custom": {
+      "path": "/usr/local/bin/my-runc-replacement",
+      "runtimeArgs": [
+        "--debug"
+      ]
+    },
+    "runc": {
+      "path": "runc"
+    }
+  }
+}
+

This is the same example via the command line:

$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement
+

Note

Defining runtime arguments via the command line is not supported.

Options for the runtime

You can configure the runtime using options specified with the --exec-opt flag. All the flag’s options have the native prefix. A single native.cgroupdriver option is available.

The native.cgroupdriver option specifies the management of the container’s cgroups. You can only specify cgroupfs or systemd. If you specify systemd and it is not available, the system errors out. If you omit the native.cgroupdriver option,cgroupfs is used on cgroup v1 hosts, systemd is used on cgroup v2 hosts with systemd available.

This example sets the cgroupdriver to systemd:

$ sudo dockerd --exec-opt native.cgroupdriver=systemd
+

Setting this option applies to all containers the daemon launches.

Also Windows Container makes use of --exec-opt for special purpose. Docker user can specify default container isolation technology with this, for example:

> dockerd --exec-opt isolation=hyperv
+

Will make hyperv the default isolation technology on Windows. If no isolation value is specified on daemon start, on Windows client, the default is hyperv, and on Windows server, the default is process.

Daemon DNS options

To set the DNS server for all Docker containers, use:

$ sudo dockerd --dns 8.8.8.8
+

To set the DNS search domain for all Docker containers, use:

$ sudo dockerd --dns-search example.com
+

Allow push of nondistributable artifacts

Some images (e.g., Windows base images) contain artifacts whose distribution is restricted by license. When these images are pushed to a registry, restricted artifacts are not included.

To override this behavior for specific registries, use the --allow-nondistributable-artifacts option in one of the following forms:

This option can be used multiple times.

This option is useful when pushing images containing nondistributable artifacts to a registry on an air-gapped network so hosts on that network can pull the images without connecting to another server.

Warning: Nondistributable artifacts typically have restrictions on how and where they can be distributed and shared. Only use this feature to push artifacts to private registries and ensure that you are in compliance with any terms that cover redistributing nondistributable artifacts.

Insecure registries

Docker considers a private registry either secure or insecure. In the rest of this section, registry is used for private registry, and myregistry:5000 is a placeholder example for a private registry.

A secure registry uses TLS and a copy of its CA certificate is placed on the Docker host at /etc/docker/certs.d/myregistry:5000/ca.crt. An insecure registry is either not using TLS (i.e., listening on plain text HTTP), or is using TLS with a CA certificate not known by the Docker daemon. The latter can happen when the certificate was not found under /etc/docker/certs.d/myregistry:5000/, or if the certificate verification failed (i.e., wrong CA).

By default, Docker assumes all, but local (see local registries below), registries are secure. Communicating with an insecure registry is not possible if Docker assumes that registry is secure. In order to communicate with an insecure registry, the Docker daemon requires --insecure-registry in one of the following two forms:

The flag can be used multiple times to allow multiple registries to be marked as insecure.

If an insecure registry is not marked as insecure, docker pull, docker push, and docker search will result in an error message prompting the user to either secure or pass the --insecure-registry flag to the Docker daemon as described above.

Local registries, whose IP address falls in the 127.0.0.0/8 range, are automatically marked as insecure as of Docker 1.3.2. It is not recommended to rely on this, as it may change in the future.

Enabling --insecure-registry, i.e., allowing un-encrypted and/or untrusted communication, can be useful when running a local registry. However, because its use creates security vulnerabilities it should ONLY be enabled for testing purposes. For increased security, users should add their CA to their system’s list of trusted CAs instead of enabling --insecure-registry.

Legacy Registries

Operations against registries supporting only the legacy v1 protocol are no longer supported. Specifically, the daemon will not attempt push, pull and login to v1 registries. The exception to this is search which can still be performed on v1 registries.

Running a Docker daemon behind an HTTPS_PROXY

When running inside a LAN that uses an HTTPS proxy, the Docker Hub certificates will be replaced by the proxy’s certificates. These certificates need to be added to your Docker host’s configuration:

  1. Install the ca-certificates package for your distribution
  2. Ask your network admin for the proxy’s CA certificate and append them to /etc/pki/tls/certs/ca-bundle.crt +
  3. Then start your Docker daemon with HTTPS_PROXY=http://username:password@proxy:port/ dockerd. The username: and password@ are optional - and are only needed if your proxy is set up to require authentication.

This will only add the proxy and authentication to the Docker daemon’s requests - your docker builds and running containers will need extra configuration to use the proxy

Default ulimit settings

--default-ulimit allows you to set the default ulimit options to use for all containers. It takes the same options as --ulimit for docker run. If these defaults are not set, ulimit settings will be inherited, if not set on docker run, from the Docker daemon. Any --ulimit options passed to docker run will overwrite these defaults.

Be careful setting nproc with the ulimit flag as nproc is designed by Linux to set the maximum number of processes available to a user, not to a container. For details please check the run reference.

Node discovery

The --cluster-advertise option specifies the host:port or interface:port combination that this particular daemon instance should use when advertising itself to the cluster. The daemon is reached by remote hosts through this value. If you specify an interface, make sure it includes the IP address of the actual Docker host. For Engine installation created through docker-machine, the interface is typically eth1.

The daemon uses libkv to advertise the node within the cluster. Some key-value backends support mutual TLS. To configure the client TLS settings used by the daemon can be configured using the --cluster-store-opt flag, specifying the paths to PEM encoded files. For example:

$ sudo dockerd \
+    --cluster-advertise 192.168.1.2:2376 \
+    --cluster-store etcd://192.168.1.2:2379 \
+    --cluster-store-opt kv.cacertfile=/path/to/ca.pem \
+    --cluster-store-opt kv.certfile=/path/to/cert.pem \
+    --cluster-store-opt kv.keyfile=/path/to/key.pem
+

The currently supported cluster store options are:

Option Description
discovery.heartbeat Specifies the heartbeat timer in seconds which is used by the daemon as a keepalive mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds.
discovery.ttl Specifies the TTL (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds.
kv.cacertfile Specifies the path to a local file with PEM encoded CA certificates to trust.
kv.certfile Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store.
kv.keyfile Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store.
kv.path Specifies the path in the Key/Value store. If not configured, the default value is ‘docker/nodes’.

Access authorization

Docker’s access authorization can be extended by authorization plugins that your organization can purchase or build themselves. You can install one or more authorization plugins when you start the Docker daemon using the --authorization-plugin=PLUGIN_ID option.

$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
+

The PLUGIN_ID value is either the plugin’s name or a path to its specification file. The plugin’s implementation determines whether you can specify a name or path. Consult with your Docker administrator to get information about the plugins available to you.

Once a plugin is installed, requests made to the daemon through the command line or Docker’s Engine API are allowed or denied by the plugin. If you have multiple plugins installed, each plugin, in order, must allow the request for it to complete.

For information about how to create an authorization plugin, refer to the authorization plugin section.

Daemon user namespace options

The Linux kernel user namespace support provides additional security by enabling a process, and therefore a container, to have a unique range of user and group IDs which are outside the traditional user and group range utilized by the host system. Potentially the most important security improvement is that, by default, container processes running as the root user will have expected administrative privilege (with some restrictions) inside the container but will effectively be mapped to an unprivileged uid on the host.

For details about how to use this feature, as well as limitations, see Isolate containers with a user namespace.

Miscellaneous options

IP masquerading uses address translation to allow containers without a public IP to talk to other machines on the Internet. This may interfere with some network topologies and can be disabled with --ip-masq=false.

Docker supports softlinks for the Docker data directory (/var/lib/docker) and for /var/lib/docker/tmp. The DOCKER_TMPDIR and the data directory can be set like this:

$ DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd --data-root /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1
+

or

$ export DOCKER_TMPDIR=/mnt/disk2/tmp
+$ /usr/local/bin/dockerd --data-root /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1
+

Default cgroup parent

The --cgroup-parent option allows you to set the default cgroup parent to use for containers. If this option is not set, it defaults to /docker for fs cgroup driver and system.slice for systemd cgroup driver.

If the cgroup has a leading forward slash (/), the cgroup is created under the root cgroup, otherwise the cgroup is created under the daemon cgroup.

Assuming the daemon is running in cgroup daemoncgroup, --cgroup-parent=/foobar creates a cgroup in /sys/fs/cgroup/memory/foobar, whereas using --cgroup-parent=foobar creates the cgroup in /sys/fs/cgroup/memory/daemoncgroup/foobar

The systemd cgroup driver has different rules for --cgroup-parent. Systemd represents hierarchy by slice and the name of the slice encodes the location in the tree. So --cgroup-parent for systemd cgroups should be a slice name. A name can consist of a dash-separated series of names, which describes the path to the slice from the root slice. For example, --cgroup-parent=user-a-b.slice means the memory cgroup for the container is created in /sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-<id>.scope.

This setting can also be set per container, using the --cgroup-parent option on docker create and docker run, and takes precedence over the --cgroup-parent option on the daemon.

Daemon metrics

The --metrics-addr option takes a tcp address to serve the metrics API. This feature is still experimental, therefore, the daemon must be running in experimental mode for this feature to work.

To serve the metrics API on localhost:9323 you would specify --metrics-addr 127.0.0.1:9323, allowing you to make requests on the API at 127.0.0.1:9323/metrics to receive metrics in the prometheus format.

Port 9323 is the default port associated with Docker metrics to avoid collisions with other prometheus exporters and services.

If you are running a prometheus server you can add this address to your scrape configs to have prometheus collect metrics on Docker. For more information on prometheus refer to the prometheus website.

scrape_configs:
+  - job_name: 'docker'
+    static_configs:
+      - targets: ['127.0.0.1:9323']
+

Please note that this feature is still marked as experimental as metrics and metric names could change while this feature is still in experimental. Please provide feedback on what you would like to see collected in the API.

Node Generic Resources

The --node-generic-resources option takes a list of key-value pair (key=value) that allows you to advertise user defined resources in a swarm cluster.

The current expected use case is to advertise NVIDIA GPUs so that services requesting NVIDIA-GPU=[0-16] can land on a node that has enough GPUs for the task to run.

Example of usage:

{
+  "node-generic-resources": [
+    "NVIDIA-GPU=UUID1",
+    "NVIDIA-GPU=UUID2"
+  ]
+}
+

Daemon configuration file

The --config-file option allows you to set any configuration option for the daemon in a JSON format. This file uses the same flag names as keys, except for flags that allow several entries, where it uses the plural of the flag name, e.g., labels for the label flag.

The options set in the configuration file must not conflict with options set via flags. The docker daemon fails to start if an option is duplicated between the file and the flags, regardless their value. We do this to avoid silently ignore changes introduced in configuration reloads. For example, the daemon fails to start if you set daemon labels in the configuration file and also set daemon labels via the --label flag. Options that are not present in the file are ignored when the daemon starts.

On Linux

The default location of the configuration file on Linux is /etc/docker/daemon.json. The --config-file flag can be used to specify a non-default location.

This is a full example of the allowed configuration options on Linux:

{
+  "allow-nondistributable-artifacts": [],
+  "api-cors-header": "",
+  "authorization-plugins": [],
+  "bip": "",
+  "bridge": "",
+  "cgroup-parent": "",
+  "cluster-advertise": "",
+  "cluster-store": "",
+  "cluster-store-opts": {},
+  "containerd": "/run/containerd/containerd.sock",
+  "containerd-namespace": "docker",
+  "containerd-plugin-namespace": "docker-plugins",
+  "data-root": "",
+  "debug": true,
+  "default-address-pools": [
+    {
+      "base": "172.30.0.0/16",
+      "size": 24
+    },
+    {
+      "base": "172.31.0.0/16",
+      "size": 24
+    }
+  ],
+  "default-cgroupns-mode": "private",
+  "default-gateway": "",
+  "default-gateway-v6": "",
+  "default-runtime": "runc",
+  "default-shm-size": "64M",
+  "default-ulimits": {
+    "nofile": {
+      "Hard": 64000,
+      "Name": "nofile",
+      "Soft": 64000
+    }
+  },
+  "dns": [],
+  "dns-opts": [],
+  "dns-search": [],
+  "exec-opts": [],
+  "exec-root": "",
+  "experimental": false,
+  "features": {},
+  "fixed-cidr": "",
+  "fixed-cidr-v6": "",
+  "group": "",
+  "hosts": [],
+  "icc": false,
+  "init": false,
+  "init-path": "/usr/libexec/docker-init",
+  "insecure-registries": [],
+  "ip": "0.0.0.0",
+  "ip-forward": false,
+  "ip-masq": false,
+  "iptables": false,
+  "ip6tables": false,
+  "ipv6": false,
+  "labels": [],
+  "live-restore": true,
+  "log-driver": "json-file",
+  "log-level": "",
+  "log-opts": {
+    "cache-disabled": "false",
+    "cache-max-file": "5",
+    "cache-max-size": "20m",
+    "cache-compress": "true",
+    "env": "os,customer",
+    "labels": "somelabel",
+    "max-file": "5",
+    "max-size": "10m"
+  },
+  "max-concurrent-downloads": 3,
+  "max-concurrent-uploads": 5,
+  "max-download-attempts": 5,
+  "mtu": 0,
+  "no-new-privileges": false,
+  "node-generic-resources": [
+    "NVIDIA-GPU=UUID1",
+    "NVIDIA-GPU=UUID2"
+  ],
+  "oom-score-adjust": -500,
+  "pidfile": "",
+  "raw-logs": false,
+  "registry-mirrors": [],
+  "runtimes": {
+    "cc-runtime": {
+      "path": "/usr/bin/cc-runtime"
+    },
+    "custom": {
+      "path": "/usr/local/bin/my-runc-replacement",
+      "runtimeArgs": [
+        "--debug"
+      ]
+    }
+  },
+  "seccomp-profile": "",
+  "selinux-enabled": false,
+  "shutdown-timeout": 15,
+  "storage-driver": "",
+  "storage-opts": [],
+  "swarm-default-advertise-addr": "",
+  "tls": true,
+  "tlscacert": "",
+  "tlscert": "",
+  "tlskey": "",
+  "tlsverify": true,
+  "userland-proxy": false,
+  "userland-proxy-path": "/usr/libexec/docker-proxy",
+  "userns-remap": ""
+}
+

Note:

You cannot set options in daemon.json that have already been set on daemon startup as a flag. On systems that use systemd to start the Docker daemon, -H is already set, so you cannot use the hosts key in daemon.json to add listening addresses. See “custom Docker daemon options” for how to accomplish this task with a systemd drop-in file.

On Windows

The default location of the configuration file on Windows is %programdata%\docker\config\daemon.json. The --config-file flag can be used to specify a non-default location.

This is a full example of the allowed configuration options on Windows:

{
+  "allow-nondistributable-artifacts": [],
+  "authorization-plugins": [],
+  "bridge": "",
+  "cluster-advertise": "",
+  "cluster-store": "",
+  "containerd": "\\\\.\\pipe\\containerd-containerd",
+  "containerd-namespace": "docker",
+  "containerd-plugin-namespace": "docker-plugins",
+  "data-root": "",
+  "debug": true,
+  "default-ulimits": {},
+  "dns": [],
+  "dns-opts": [],
+  "dns-search": [],
+  "exec-opts": [],
+  "experimental": false,
+  "features": {},
+  "fixed-cidr": "",
+  "group": "",
+  "hosts": [],
+  "insecure-registries": [],
+  "labels": [],
+  "log-driver": "",
+  "log-level": "",
+  "max-concurrent-downloads": 3,
+  "max-concurrent-uploads": 5,
+  "max-download-attempts": 5,
+  "mtu": 0,
+  "pidfile": "",
+  "raw-logs": false,
+  "registry-mirrors": [],
+  "shutdown-timeout": 15,
+  "storage-driver": "",
+  "storage-opts": [],
+  "swarm-default-advertise-addr": "",
+  "tlscacert": "",
+  "tlscert": "",
+  "tlskey": "",
+  "tlsverify": true
+}
+

Feature options

The optional field features in daemon.json allows users to enable or disable specific daemon features. For example, {"features":{"buildkit": true}} enables buildkit as the default docker image builder.

The list of currently supported feature options:

Configuration reload behavior

Some options can be reconfigured when the daemon is running without requiring to restart the process. We use the SIGHUP signal in Linux to reload, and a global event in Windows with the key Global\docker-daemon-config-$PID. The options can be modified in the configuration file but still will check for conflicts with the provided flags. The daemon fails to reconfigure itself if there are conflicts, but it won’t stop execution.

The list of currently supported options that can be reconfigured is this:

Updating and reloading the cluster configurations such as --cluster-store, --cluster-advertise and --cluster-store-opts will take effect only if these configurations were not previously configured. If --cluster-store has been provided in flags and cluster-advertise not, cluster-advertise can be added in the configuration file without accompanied by --cluster-store. Configuration reload will log a warning message if it detects a change in previously configured cluster configurations.

Run multiple daemons

Note:

Running multiple daemons on a single host is considered as “experimental”. The user should be aware of unsolved problems. This solution may not work properly in some cases. Solutions are currently under development and will be delivered in the near future.

This section describes how to run multiple Docker daemons on a single host. To run multiple daemons, you must configure each daemon so that it does not conflict with other daemons on the same host. You can set these options either by providing them as flags, or by using a daemon configuration file.

The following daemon options must be configured for each daemon:

-b, --bridge=                          Attach containers to a network bridge
+--exec-root=/var/run/docker            Root of the Docker execdriver
+--data-root=/var/lib/docker            Root of persisted Docker data
+-p, --pidfile=/var/run/docker.pid      Path to use for daemon PID file
+-H, --host=[]                          Daemon socket(s) to connect to
+--iptables=true                        Enable addition of iptables rules
+--config-file=/etc/docker/daemon.json  Daemon configuration file
+--tlscacert="~/.docker/ca.pem"         Trust certs signed only by this CA
+--tlscert="~/.docker/cert.pem"         Path to TLS certificate file
+--tlskey="~/.docker/key.pem"           Path to TLS key file
+

When your daemons use different values for these flags, you can run them on the same host without any problems. It is very important to properly understand the meaning of those options and to use them correctly.

Example script for a separate “bootstrap” instance of the Docker daemon without network:

$ sudo dockerd \
+        -H unix:///var/run/docker-bootstrap.sock \
+        -p /var/run/docker-bootstrap.pid \
+        --iptables=false \
+        --ip-masq=false \
+        --bridge=none \
+        --data-root=/var/lib/docker-bootstrap \
+        --exec-root=/var/run/docker-bootstrap
+
+

container, daemon, runtime

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/dockerd/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fevents%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fevents%2Findex.html new file mode 100644 index 00000000..c9aa36c1 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fevents%2Findex.html @@ -0,0 +1,171 @@ +

docker events


Get real time events from the server

Usage

$ docker events [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Use docker events to get real-time events from the server. These events differ per Docker object type. Different event types have different scopes. Local scoped events are only seen on the node they take place on, and swarm scoped events are seen on all managers.

Only the last 1000 log events are returned. You can use filters to further limit the number of events returned.

Object types

Containers

Docker containers report the following events:

Images

Docker images report the following events:

Plugins

Docker plugins report the following events:

Volumes

Docker volumes report the following events:

Networks

Docker networks report the following events:

Daemons

Docker daemons report the following events:

Services

Docker services report the following events:

Nodes

Docker nodes report the following events:

Secrets

Docker secrets report the following events:

Configs

Docker configs report the following events:

Limiting, filtering, and formatting the output

Limit events by time

The --since and --until parameters can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative to the client machine’s time. If you do not provide the --since option, the command returns only new and/or live events. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the client will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long.

Only the last 1000 log events are returned. You can use filters to further limit the number of events returned.

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If you would like to use multiple filters, pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

Using the same filter multiple times will be handled as a OR; for example --filter container=588a23dac085 --filter container=a8f7720b8c22 will display events for container 588a23dac085 OR container a8f7720b8c22

Using multiple filters will be handled as a AND; for example --filter container=588a23dac085 --filter event=start will display events for container container 588a23dac085 AND the event type is start

The currently supported filters are:

Format

If a format (--format) is specified, the given template will be executed instead of the default format. Go’s text/template package describes all the details of the format.

If a format is set to {{json .}}, the events are streamed as valid JSON Lines. For information about JSON Lines, please refer to https://jsonlines.org/.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Format the output using the given Go template
--since Show all events created since timestamp
--until Stream events until this timestamp

Examples

Basic example

You’ll need two shells for this example.

Shell 1: Listening for events:

$ docker events
+

Shell 2: Start and Stop containers:

$ docker create --name test alpine:latest top
+$ docker start test
+$ docker stop test
+

Shell 1: (Again .. now showing events):

2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+

To exit the docker events command, use CTRL+C.

Filter events by time

You can filter the output by an absolute timestamp or relative time on the host machine, using the following different time syntaxes:

$ docker events --since 1483283804
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker events --since '2017-01-05'
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker events --since '2013-09-03T15:49:29'
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker events --since '10m'
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker events --since '2017-01-05T00:35:30' --until '2017-01-05T00:36:05'
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+

Filter events by criteria

The following commands show several different ways to filter the docker event output.

$ docker events --filter 'event=stop'
+
+2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain)
+
+$ docker events --filter 'image=alpine'
+
+2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner)
+2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
+2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15)
+2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9)
+2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner)
+2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner)
+
+$ docker events --filter 'container=test'
+
+2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8'
+
+2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
+2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9)
+2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test)
+2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker events --filter 'container=test' --filter 'event=stop'
+
+2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test)
+
+$ docker events --filter 'type=volume'
+
+2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local)
+2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate)
+2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local)
+2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local)
+
+$ docker events --filter 'type=network'
+
+2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge)
+2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge)
+
+$ docker events --filter 'container=container_1' --filter 'container=container_2'
+
+2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
+2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
+2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8)
+2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
+
+$ docker events --filter 'type=volume'
+
+2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local)
+2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate)
+2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local)
+2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local)
+
+$ docker events --filter 'type=network'
+
+2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge)
+2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge)
+
+$ docker events --filter 'type=plugin'
+
+2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
+2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
+
+$ docker events -f type=service
+
+2017-07-12T06:34:07.999446625Z service create wj64st89fzgchxnhiqpn8p4oj (name=reverent_albattani)
+2017-07-12T06:34:21.405496207Z service remove wj64st89fzgchxnhiqpn8p4oj (name=reverent_albattani)
+
+$ docker events -f type=node
+
+2017-07-12T06:21:51.951586759Z node update 3xyz5ttp1a253q74z1thwywk9 (name=ip-172-31-23-42, state.new=ready, state.old=unknown)
+
+$ docker events -f type=secret
+
+2017-07-12T06:32:13.915704367Z secret create s8o6tmlnndrgzbmdilyy5ymju (name=new_secret)
+2017-07-12T06:32:37.052647783Z secret remove s8o6tmlnndrgzbmdilyy5ymju (name=new_secret)
+
+$  docker events -f type=config
+2017-07-12T06:44:13.349037127Z config create u96zlvzdfsyb9sg4mhyxfh3rl (name=abc)
+2017-07-12T06:44:36.327694184Z config remove u96zlvzdfsyb9sg4mhyxfh3rl (name=abc)
+
+$ docker events --filter 'scope=swarm'
+
+2017-07-10T07:46:50.250024503Z service create m8qcxu8081woyof7w3jaax6gk (name=affectionate_wilson)
+2017-07-10T07:47:31.093797134Z secret create 6g5pufzsv438p9tbvl9j94od4 (name=new_secret)
+

Format the output

$ docker events --filter 'type=container' --format 'Type={{.Type}}  Status={{.Status}}  ID={{.ID}}'
+
+Type=container  Status=create  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=attach  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=start  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=resize  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=die  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=destroy  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+

Format as JSON

$ docker events --format '{{json .}}'
+
+{"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
+{"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
+{"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e..
+{"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42..
+{"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/events/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fexec%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fexec%2Findex.html new file mode 100644 index 00000000..9e9de22f --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fexec%2Findex.html @@ -0,0 +1,42 @@ +

docker exec


Run a command in a running container

Usage

$ docker exec [OPTIONS] CONTAINER COMMAND [ARG...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker exec command runs a new command in a running container.

The command started using docker exec only runs while the container’s primary process (PID 1) is running, and it is not restarted if the container is restarted.

COMMAND will run in the default directory of the container. If the underlying image has a custom directory specified with the WORKDIR directive in its Dockerfile, this will be used instead.

COMMAND should be an executable, a chained or a quoted command will not work. Example: docker exec -ti my_container "echo a && echo b" will not work, but docker exec -ti my_container sh -c "echo a && echo b" will.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--detach , -d + Detached mode: run command in the background
--detach-keys Override the key sequence for detaching a container
+--env , -e + Set environment variables
--env-file Read in a file of environment variables
+--interactive , -i + Keep STDIN open even if not attached
--privileged Give extended privileges to the command
+--tty , -t + Allocate a pseudo-TTY
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
+--workdir , -w + Working directory inside the container

Examples

Run docker exec on a running container

First, start a container.

$ docker run --name ubuntu_bash --rm -i -t ubuntu bash
+

This will create a container named ubuntu_bash and start a Bash session.

Next, execute a command on the container.

$ docker exec -d ubuntu_bash touch /tmp/execWorks
+

This will create a new file /tmp/execWorks inside the running container ubuntu_bash, in the background.

Next, execute an interactive bash shell on the container.

$ docker exec -it ubuntu_bash bash
+

This will create a new Bash session in the container ubuntu_bash.

Next, set an environment variable in the current bash session.

$ docker exec -it -e VAR=1 ubuntu_bash bash
+

This will create a new Bash session in the container ubuntu_bash with environment variable $VAR set to “1”. Note that this environment variable will only be valid on the current Bash session.

By default docker exec command runs in the same working directory set when container was created.

$ docker exec -it ubuntu_bash pwd
+/
+

You can select working directory for the command to execute into

$ docker exec -it -w /root ubuntu_bash pwd
+/root
+

Try to run docker exec on a paused container

If the container is paused, then the docker exec command will fail with an error:

$ docker pause test
+
+test
+
+$ docker ps
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS                   PORTS               NAMES
+1ae3b36715d2        ubuntu:latest       "bash"              17 seconds ago      Up 16 seconds (Paused)                       test
+
+$ docker exec test ls
+
+FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec
+
+$ echo $?
+1
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/exec/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fexport%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fexport%2Findex.html new file mode 100644 index 00000000..39edbb15 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fexport%2Findex.html @@ -0,0 +1,11 @@ +

docker export


Export a container’s filesystem as a tar archive

Usage

$ docker export [OPTIONS] CONTAINER
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker export command does not export the contents of volumes associated with the container. If a volume is mounted on top of an existing directory in the container, docker export will export the contents of the underlying directory, not the contents of the volume.

Refer to Backup, restore, or migrate data volumes in the user guide for examples on exporting data in a volume.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--output , -o + Write to a file, instead of STDOUT

Examples

Each of these commands has the same result.

$ docker export red_panda > latest.tar
+
$ docker export --output="latest.tar" red_panda
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/export/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fhistory%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fhistory%2Findex.html new file mode 100644 index 00000000..143f8012 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fhistory%2Findex.html @@ -0,0 +1,30 @@ +

docker history


Show the history of an image

Usage

$ docker history [OPTIONS] IMAGE
+

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--format Pretty-print images using a Go template
+--human , -H + true Print sizes and dates in human readable format
--no-trunc Don't truncate output
+--quiet , -q + Only show image IDs

Examples

To see how the docker:latest image was built:

$ docker history docker
+
+IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
+3e23a5875458        8 days ago          /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8            0 B
+8578938dd170        8 days ago          /bin/sh -c dpkg-reconfigure locales &&    loc   1.245 MB
+be51b77efb42        8 days ago          /bin/sh -c apt-get update && apt-get install    338.3 MB
+4b137612be55        6 weeks ago         /bin/sh -c #(nop) ADD jessie.tar.xz in /        121 MB
+750d58736b4b        6 weeks ago         /bin/sh -c #(nop) MAINTAINER Tianon Gravi <ad   0 B
+511136ea3c5a        9 months ago                                                        0 B                 Imported from -
+

To see how the docker:apache image was added to a container’s base image:

$ docker history docker:scm
+IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
+2ac9d1098bf1        3 months ago        /bin/bash                                       241.4 MB            Added Apache to Fedora base image
+88b42ffd1f7c        5 months ago        /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7   373.7 MB
+c69cab00d6ef        5 months ago        /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar   0 B
+511136ea3c5a        19 months ago                                                       0 B                 Imported from -
+

Format the output

The formatting option (--format) will pretty-prints history output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Image ID
.CreatedSince Elapsed time since the image was created if --human=true, otherwise timestamp of when image was created
.CreatedAt Timestamp of when image was created
.CreatedBy Command that was used to create the image
.Size Image disk size
.Comment Comment for image

When using the --format option, the history command will either output the data exactly as the template declares or, when using the table directive, will include column headers as well.

The following example uses a template without headers and outputs the ID and CreatedSince entries separated by a colon (:) for the busybox image:

$ docker history --format "{{.ID}}: {{.CreatedSince}}" busybox
+
+f6e427c148a7: 4 weeks ago
+<missing>: 4 weeks ago
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/history/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage%2Findex.html new file mode 100644 index 00000000..c7ca0434 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage%2Findex.html @@ -0,0 +1,7 @@ +

docker image


Manage images

Usage

$ docker image COMMAND
+

Description

Manage images.

Child commands

Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_build%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_build%2Findex.html new file mode 100644 index 00000000..a423f48c --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_build%2Findex.html @@ -0,0 +1,22 @@ +

docker image build


Build an image from a Dockerfile

Usage

$ docker image build [OPTIONS] PATH | URL | -
+

Options

Name, shorthand Default Description
--add-host Add a custom host-to-IP mapping (host:ip)
--build-arg Set build-time variables
--cache-from Images to consider as cache sources
--cgroup-parent Optional parent cgroup for the container
--compress Compress the build context using gzip
--cpu-period Limit the CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit the CPU CFS (Completely Fair Scheduler) quota
+--cpu-shares , -c + CPU shares (relative weight)
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
--disable-content-trust true Skip image verification
+--file , -f + Name of the Dockerfile (Default is 'PATH/Dockerfile')
--force-rm Always remove intermediate containers
--iidfile Write the image ID to the file
--isolation Container isolation technology
--label Set metadata for an image
+--memory , -m + Memory limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--network Set the networking mode for the RUN instructions during build
--no-cache Do not use cache when building the image
+--output , -o + +API 1.40+
Output destination (format: type=local,dest=path)
--platform +API 1.40+
Set platform if server is multi-platform capable
--progress auto Set type of progress output (auto, plain, tty). Use plain to show container output
--pull Always attempt to pull a newer version of the image
+--quiet , -q + Suppress the build output and print image ID on success
--rm true Remove intermediate containers after a successful build
--secret Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret
--security-opt Security options
--shm-size Size of /dev/shm
--squash +experimental (daemon)
Squash newly built layers into a single new layer
--ssh SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])
--stream Stream attaches to server to negotiate build context
+--tag , -t + Name and optionally a tag in the 'name:tag' format
--target Set the target build stage to build.
--ulimit Ulimit options

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_build/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_history%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_history%2Findex.html new file mode 100644 index 00000000..afc02e45 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_history%2Findex.html @@ -0,0 +1,11 @@ +

docker image history


Show the history of an image

Usage

$ docker image history [OPTIONS] IMAGE
+

Options

Name, shorthand Default Description
--format Pretty-print images using a Go template
+--human , -H + true Print sizes and dates in human readable format
--no-trunc Don't truncate output
+--quiet , -q + Only show image IDs

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_history/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_import%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_import%2Findex.html new file mode 100644 index 00000000..70b4fdc7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_import%2Findex.html @@ -0,0 +1,11 @@ +

docker image import


Import the contents from a tarball to create a filesystem image

Usage

$ docker image import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]
+

Options

Name, shorthand Default Description
+--change , -c + Apply Dockerfile instruction to the created image
+--message , -m + Set commit message for imported image
--platform Set platform if server is multi-platform capable

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_import/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_inspect%2Findex.html new file mode 100644 index 00000000..3c810158 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_inspect%2Findex.html @@ -0,0 +1,9 @@ +

docker image inspect


Display detailed information on one or more images

Usage

$ docker image inspect [OPTIONS] IMAGE [IMAGE...]
+

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_load%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_load%2Findex.html new file mode 100644 index 00000000..000408b7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_load%2Findex.html @@ -0,0 +1,11 @@ +

docker image load


Load an image from a tar archive or STDIN

Usage

$ docker image load [OPTIONS]
+

Options

Name, shorthand Default Description
+--input , -i + Read from tar archive file, instead of STDIN
+--quiet , -q + Suppress the load output

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_load/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_ls%2Findex.html new file mode 100644 index 00000000..10114c1b --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_ls%2Findex.html @@ -0,0 +1,13 @@ +

docker image ls


List images

Usage

$ docker image ls [OPTIONS] [REPOSITORY[:TAG]]
+

Options

Name, shorthand Default Description
+--all , -a + Show all images (default hides intermediate images)
--digests Show digests
+--filter , -f + Filter output based on conditions provided
--format Pretty-print images using a Go template
--no-trunc Don't truncate output
+--quiet , -q + Only show image IDs

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_prune%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_prune%2Findex.html new file mode 100644 index 00000000..d321382d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_prune%2Findex.html @@ -0,0 +1,102 @@ +

docker image prune


Remove unused images

Usage

$ docker image prune [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Remove all dangling images. If -a is specified, will also remove all images not referenced by any container.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--all , -a + Remove all unused images, not just dangling ones
--filter Provide filter values (e.g. 'until=<timestamp>')
+--force , -f + Do not prompt for confirmation

Examples

Example output:

$ docker image prune -a
+
+WARNING! This will remove all images without at least one container associated to them.
+Are you sure you want to continue? [y/N] y
+Deleted Images:
+untagged: alpine:latest
+untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a
+deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba
+deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f
+untagged: alpine:3.3
+untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423
+untagged: my-jq:latest
+deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff
+deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65
+deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7
+untagged: my-curl:latest
+deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e
+deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9
+deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e
+deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec
+deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06
+deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c
+deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35
+deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809
+deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0
+deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac
+deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b
+deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1
+
+Total reclaimed space: 16.43 MB
+

Filtering

The filtering flag (--filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

The until filter can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative to the daemon machine’s time. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the daemon will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long.

The label filter accepts two formats. One is the label=... (label=<key> or label=<key>=<value>), which removes images with the specified labels. The other format is the label!=... (label!=<key> or label!=<key>=<value>), which removes images without the specified labels.

Predicting what will be removed

If you are using positive filtering (testing for the existence of a label or that a label has a specific value), you can use docker image ls with the same filtering syntax to see which images match your filter.

However, if you are using negative filtering (testing for the absence of a label or that a label does not have a specific value), this type of filter does not work with docker image ls so you cannot easily predict which images will be removed. In addition, the confirmation prompt for docker image prune always warns that all dangling images will be removed, even if you are using --filter.

The following removes images created before 2017-01-04T00:00:00:

$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}'
+REPOSITORY          TAG                 IMAGE ID            CREATED AT                      SIZE
+foo                 latest              2f287ac753da        2017-01-04 13:42:23 -0800 PST   3.98 MB
+alpine              latest              88e169ea8f46        2016-12-27 10:17:25 -0800 PST   3.98 MB
+busybox             latest              e02e811dd08f        2016-10-07 14:03:58 -0700 PDT   1.09 MB
+
+$ docker image prune -a --force --filter "until=2017-01-04T00:00:00"
+
+Deleted Images:
+untagged: alpine:latest
+untagged: alpine@sha256:dfbd4a3a8ebca874ebd2474f044a0b33600d4523d03b0df76e5c5986cb02d7e8
+untagged: busybox:latest
+untagged: busybox@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912
+deleted: sha256:e02e811dd08fd49e7f6032625495118e63f597eb150403d02e3238af1df240ba
+deleted: sha256:e88b3f82283bc59d5e0df427c824e9f95557e661fcb0ea15fb0fb6f97760f9d9
+
+Total reclaimed space: 1.093 MB
+
+$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}'
+
+REPOSITORY          TAG                 IMAGE ID            CREATED AT                      SIZE
+foo                 latest              2f287ac753da        2017-01-04 13:42:23 -0800 PST   3.98 MB
+

The following removes images created more than 10 days (240h) ago:

$ docker images
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+foo                 latest              2f287ac753da        14 seconds ago      3.98 MB
+alpine              latest              88e169ea8f46        8 days ago          3.98 MB
+debian              jessie              7b0a06c805e8        2 months ago        123 MB
+busybox             latest              e02e811dd08f        2 months ago        1.09 MB
+golang              1.7.0               138c2e655421        4 months ago        670 MB
+
+$ docker image prune -a --force --filter "until=240h"
+
+Deleted Images:
+untagged: golang:1.7.0
+untagged: golang@sha256:6765038c2b8f407fd6e3ecea043b44580c229ccfa2a13f6d85866cf2b4a9628e
+deleted: sha256:138c2e6554219de65614d88c15521bfb2da674cbb0bf840de161f89ff4264b96
+deleted: sha256:ec353c2e1a673f456c4b78906d0d77f9d9456cfb5229b78c6a960bfb7496b76a
+deleted: sha256:fe22765feaf3907526b4921c73ea6643ff9e334497c9b7e177972cf22f68ee93
+deleted: sha256:ff845959c80148421a5c3ae11cc0e6c115f950c89bc949646be55ed18d6a2912
+deleted: sha256:a4320831346648c03db64149eafc83092e2b34ab50ca6e8c13112388f25899a7
+deleted: sha256:4c76020202ee1d9709e703b7c6de367b325139e74eebd6b55b30a63c196abaf3
+deleted: sha256:d7afd92fb07236c8a2045715a86b7d5f0066cef025018cd3ca9a45498c51d1d6
+deleted: sha256:9e63c5bce4585dd7038d830a1f1f4e44cb1a1515b00e620ac718e934b484c938
+untagged: debian:jessie
+untagged: debian@sha256:c1af755d300d0c65bb1194d24bce561d70c98a54fb5ce5b1693beb4f7988272f
+deleted: sha256:7b0a06c805e8f23807fb8856621c60851727e85c7bcb751012c813f122734c8d
+deleted: sha256:f96222d75c5563900bc4dd852179b720a0885de8f7a0619ba0ac76e92542bbc8
+
+Total reclaimed space: 792.6 MB
+
+$ docker images
+
+REPOSITORY          TAG                 IMAGE ID            CREATED              SIZE
+foo                 latest              2f287ac753da        About a minute ago   3.98 MB
+alpine              latest              88e169ea8f46        8 days ago           3.98 MB
+busybox             latest              e02e811dd08f        2 months ago         1.09 MB
+

The following example removes images with the label deprecated:

$ docker image prune --filter="label=deprecated"
+

The following example removes images with the label maintainer set to john:

$ docker image prune --filter="label=maintainer=john"
+

This example removes images which have no maintainer label:

$ docker image prune --filter="label!=maintainer"
+

This example removes images which have a maintainer label not set to john:

$ docker image prune --filter="label!=maintainer=john"
+

Note

You are prompted for confirmation before the prune removes anything, but you are not shown a list of what will potentially be removed. In addition, docker image ls does not support negative filtering, so it difficult to predict what images will actually be removed.

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_prune/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_pull%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_pull%2Findex.html new file mode 100644 index 00000000..ed66a3cd --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_pull%2Findex.html @@ -0,0 +1,11 @@ +

docker image pull


Pull an image or a repository from a registry

Usage

$ docker image pull [OPTIONS] NAME[:TAG|@DIGEST]
+

Options

Name, shorthand Default Description
+--all-tags , -a + Download all tagged images in the repository
--disable-content-trust true Skip image verification
--platform Set platform if server is multi-platform capable
+--quiet , -q + Suppress verbose output

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_pull/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_push%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_push%2Findex.html new file mode 100644 index 00000000..7a0e18ca --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_push%2Findex.html @@ -0,0 +1,11 @@ +

docker image push


Push an image or a repository to a registry

Usage

$ docker image push [OPTIONS] NAME[:TAG]
+

Options

Name, shorthand Default Description
+--all-tags , -a + Push all tagged images in the repository
--disable-content-trust true Skip image signing
+--quiet , -q + Suppress verbose output

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_push/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_rm%2Findex.html new file mode 100644 index 00000000..6e96ddc7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_rm%2Findex.html @@ -0,0 +1,9 @@ +

docker image rm


Remove one or more images

Usage

$ docker image rm [OPTIONS] IMAGE [IMAGE...]
+

Options

Name, shorthand Default Description
+--force , -f + Force removal of the image
--no-prune Do not delete untagged parents

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_save%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_save%2Findex.html new file mode 100644 index 00000000..06e16c07 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_save%2Findex.html @@ -0,0 +1,9 @@ +

docker image save


Save one or more images to a tar archive (streamed to STDOUT by default)

Usage

$ docker image save [OPTIONS] IMAGE [IMAGE...]
+

Options

Name, shorthand Default Description
+--output , -o + Write to a file, instead of STDOUT

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_save/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_tag%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_tag%2Findex.html new file mode 100644 index 00000000..80efa30d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimage_tag%2Findex.html @@ -0,0 +1,7 @@ +

docker image tag


Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE

Usage

$ docker image tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]
+

Parent command

Command Description
docker image Manage images
Command Description
docker image build Build an image from a Dockerfile
docker image history Show the history of an image
docker image import Import the contents from a tarball to create a filesystem image
docker image inspect Display detailed information on one or more images
docker image load Load an image from a tar archive or STDIN
docker image ls List images
docker image prune Remove unused images
docker image pull Pull an image or a repository from a registry
docker image push Push an image or a repository to a registry
docker image rm Remove one or more images
docker image save Save one or more images to a tar archive (streamed to STDOUT by default)
docker image tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/image_tag/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimages%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimages%2Findex.html new file mode 100644 index 00000000..018d1bd2 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimages%2Findex.html @@ -0,0 +1,136 @@ +

docker images


List images

Usage

$ docker images [OPTIONS] [REPOSITORY[:TAG]]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The default docker images will show all top level images, their repository and tags, and their size.

Docker images have intermediate layers that increase reusability, decrease disk usage, and speed up docker build by allowing each step to be cached. These intermediate layers are not shown by default.

The SIZE is the cumulative space taken up by the image and all its parent images. This is also the disk space used by the contents of the Tar file created when you docker save an image.

An image will be listed more than once if it has multiple repository names or tags. This single image (identifiable by its matching IMAGE ID) uses up the SIZE listed only once.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--all , -a + Show all images (default hides intermediate images)
--digests Show digests
+--filter , -f + Filter output based on conditions provided
--format Pretty-print images using a Go template
--no-trunc Don't truncate output
+--quiet , -q + Only show image IDs

Examples

List the most recently created images

$ docker images
+
+REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
+<none>                    <none>              77af4d6b9913        19 hours ago        1.089 GB
+committ                   latest              b6fa739cedf5        19 hours ago        1.089 GB
+<none>                    <none>              78a85c484f71        19 hours ago        1.089 GB
+docker                    latest              30557a29d5ab        20 hours ago        1.089 GB
+<none>                    <none>              5ed6274db6ce        24 hours ago        1.089 GB
+postgres                  9                   746b819f315e        4 days ago          213.4 MB
+postgres                  9.3                 746b819f315e        4 days ago          213.4 MB
+postgres                  9.3.5               746b819f315e        4 days ago          213.4 MB
+postgres                  latest              746b819f315e        4 days ago          213.4 MB
+

List images by name and tag

The docker images command takes an optional [REPOSITORY[:TAG]] argument that restricts the list to images that match the argument. If you specify REPOSITORYbut no TAG, the docker images command lists all images in the given repository.

For example, to list all images in the “java” repository, run this command :

$ docker images java
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+java                8                   308e519aac60        6 days ago          824.5 MB
+java                7                   493d82594c15        3 months ago        656.3 MB
+java                latest              2711b1d6f3aa        5 months ago        603.9 MB
+

The [REPOSITORY[:TAG]] value must be an “exact match”. This means that, for example, docker images jav does not match the image java.

If both REPOSITORY and TAG are provided, only images matching that repository and tag are listed. To find all local images in the “java” repository with tag “8” you can use:

$ docker images java:8
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+java                8                   308e519aac60        6 days ago          824.5 MB
+

If nothing matches REPOSITORY[:TAG], the list is empty.

$ docker images java:0
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+

List the full length image IDs

$ docker images --no-trunc
+
+REPOSITORY                    TAG                 IMAGE ID                                                                  CREATED             SIZE
+<none>                        <none>              sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182   19 hours ago        1.089 GB
+committest                    latest              sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f   19 hours ago        1.089 GB
+<none>                        <none>              sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921   19 hours ago        1.089 GB
+docker                        latest              sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4   20 hours ago        1.089 GB
+<none>                        <none>              sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5   20 hours ago        1.089 GB
+<none>                        <none>              sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b   22 hours ago        1.082 GB
+<none>                        <none>              sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a   23 hours ago        1.089 GB
+tryout                        latest              sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074   23 hours ago        131.5 MB
+<none>                        <none>              sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df   24 hours ago        1.089 GB
+

List image digests

Images that use the v2 or later format have a content-addressable identifier called a digest. As long as the input used to generate the image is unchanged, the digest value is predictable. To list image digest values, use the --digests flag:

$ docker images --digests
+REPOSITORY                         TAG                 DIGEST                                                                    IMAGE ID            CREATED             SIZE
+localhost:5000/test/busybox        <none>              sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf   4986bf8c1536        9 weeks ago         2.43 MB
+

When pushing or pulling to a 2.0 registry, the push or pull command output includes the image digest. You can pull using a digest value. You can also reference by digest in create, run, and rmi commands, as well as the FROM image reference in a Dockerfile.

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

Show untagged images (dangling)

$ docker images --filter "dangling=true"
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+<none>              <none>              8abc22fbb042        4 weeks ago         0 B
+<none>              <none>              48e5f45168b9        4 weeks ago         2.489 MB
+<none>              <none>              bf747efa0e2f        4 weeks ago         0 B
+<none>              <none>              980fe10e5736        12 weeks ago        101.4 MB
+<none>              <none>              dea752e4e117        12 weeks ago        101.4 MB
+<none>              <none>              511136ea3c5a        8 months ago        0 B
+

This will display untagged images that are the leaves of the images tree (not intermediary layers). These images occur when a new build of an image takes the repo:tag away from the image ID, leaving it as <none>:<none> or untagged. A warning will be issued if trying to remove an image when a container is presently using it. By having this flag it allows for batch cleanup.

You can use this in conjunction with docker rmi ...:

$ docker rmi $(docker images -f "dangling=true" -q)
+
+8abc22fbb042
+48e5f45168b9
+bf747efa0e2f
+980fe10e5736
+dea752e4e117
+511136ea3c5a
+

Docker warns you if any containers exist that are using these untagged images.

Show images with a given label

The label filter matches images based on the presence of a label alone or a label and a value.

The following filter matches images with the com.example.version label regardless of its value.

$ docker images --filter "label=com.example.version"
+
+REPOSITORY          TAG                 IMAGE ID            CREATED              SIZE
+match-me-1          latest              eeae25ada2aa        About a minute ago   188.3 MB
+match-me-2          latest              dea752e4e117        About a minute ago   188.3 MB
+

The following filter matches images with the com.example.version label with the 1.0 value.

$ docker images --filter "label=com.example.version=1.0"
+
+REPOSITORY          TAG                 IMAGE ID            CREATED              SIZE
+match-me            latest              511136ea3c5a        About a minute ago   188.3 MB
+

In this example, with the 0.1 value, it returns an empty set because no matches were found.

$ docker images --filter "label=com.example.version=0.1"
+REPOSITORY          TAG                 IMAGE ID            CREATED              SIZE
+

Filter images by time

The before filter shows only images created before the image with given id or reference. For example, having these images:

$ docker images
+
+REPOSITORY          TAG                 IMAGE ID            CREATED              SIZE
+image1              latest              eeae25ada2aa        4 minutes ago        188.3 MB
+image2              latest              dea752e4e117        9 minutes ago        188.3 MB
+image3              latest              511136ea3c5a        25 minutes ago       188.3 MB
+

Filtering with before would give:

$ docker images --filter "before=image1"
+
+REPOSITORY          TAG                 IMAGE ID            CREATED              SIZE
+image2              latest              dea752e4e117        9 minutes ago        188.3 MB
+image3              latest              511136ea3c5a        25 minutes ago       188.3 MB
+

Filtering with since would give:

$ docker images --filter "since=image3"
+REPOSITORY          TAG                 IMAGE ID            CREATED              SIZE
+image1              latest              eeae25ada2aa        4 minutes ago        188.3 MB
+image2              latest              dea752e4e117        9 minutes ago        188.3 MB
+

Filter images by reference

The reference filter shows only images whose reference matches the specified pattern.

$ docker images
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+busybox             latest              e02e811dd08f        5 weeks ago         1.09 MB
+busybox             uclibc              e02e811dd08f        5 weeks ago         1.09 MB
+busybox             musl                733eb3059dce        5 weeks ago         1.21 MB
+busybox             glibc               21c16b6787c6        5 weeks ago         4.19 MB
+

Filtering with reference would give:

$ docker images --filter=reference='busy*:*libc'
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+busybox             uclibc              e02e811dd08f        5 weeks ago         1.09 MB
+busybox             glibc               21c16b6787c6        5 weeks ago         4.19 MB
+

Filtering with multiple reference would give, either match A or B:

$ docker images --filter=reference='busy*:uclibc' --filter=reference='busy*:glibc'
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+busybox             uclibc              e02e811dd08f        5 weeks ago         1.09 MB
+busybox             glibc               21c16b6787c6        5 weeks ago         4.19 MB
+

Format the output

The formatting option (--format) will pretty print container output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Image ID
.Repository Image repository
.Tag Image tag
.Digest Image digest
.CreatedSince Elapsed time since the image was created
.CreatedAt Time when the image was created
.Size Image disk size

When using the --format option, the image command will either output the data exactly as the template declares or, when using the table directive, will include column headers as well.

The following example uses a template without headers and outputs the ID and Repository entries separated by a colon (:) for all images:

$ docker images --format "{{.ID}}: {{.Repository}}"
+
+77af4d6b9913: <none>
+b6fa739cedf5: committ
+78a85c484f71: <none>
+30557a29d5ab: docker
+5ed6274db6ce: <none>
+746b819f315e: postgres
+746b819f315e: postgres
+746b819f315e: postgres
+746b819f315e: postgres
+

To list all images with their repository and tag in a table format you can use:

$ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}"
+
+IMAGE ID            REPOSITORY                TAG
+77af4d6b9913        <none>                    <none>
+b6fa739cedf5        committ                   latest
+78a85c484f71        <none>                    <none>
+30557a29d5ab        docker                    latest
+5ed6274db6ce        <none>                    <none>
+746b819f315e        postgres                  9
+746b819f315e        postgres                  9.3
+746b819f315e        postgres                  9.3.5
+746b819f315e        postgres                  latest
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/images/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fimport%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimport%2Findex.html new file mode 100644 index 00000000..477eb5c3 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fimport%2Findex.html @@ -0,0 +1,17 @@ +

docker import


Import the contents from a tarball to create a filesystem image

Usage

$ docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

You can specify a URL or - (dash) to take data directly from STDIN. The URL can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a filesystem or to an individual file on the Docker host. If you specify an archive, Docker untars it in the container relative to the / (root). If you specify an individual file, you must specify the full path within the host. To import from a remote location, specify a URI that begins with the http:// or https:// protocol.

The --change option applies Dockerfile instructions to the image that is created. Supported Dockerfile instructions: CMD|ENTRYPOINT|ENV|EXPOSE|ONBUILD|USER|VOLUME|WORKDIR

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--change , -c + Apply Dockerfile instruction to the created image
+--message , -m + Set commit message for imported image
--platform Set platform if server is multi-platform capable

Examples

Import from a remote location

This creates a new untagged image.

$ docker import https://example.com/exampleimage.tgz
+

Import from a local file

Import to docker via pipe and STDIN.

$ cat exampleimage.tgz | docker import - exampleimagelocal:new
+

Import with a commit message.

$ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new
+

Import to docker from a local archive.

$ docker import /path/to/exampleimage.tgz
+

Import from a local directory

$ sudo tar -c . | docker import - exampleimagedir
+

Import from a local directory with new configurations

$ sudo tar -c . | docker import --change "ENV DEBUG=true" - exampleimagedir
+

Note the sudo in this example – you must preserve the ownership of the files (especially root ownership) during the archiving with tar. If you are not root (or the sudo command) when you tar, then the ownerships might not get preserved.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/import/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Finfo%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Finfo%2Findex.html new file mode 100644 index 00000000..684cda55 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Finfo%2Findex.html @@ -0,0 +1,186 @@ +

docker info


Display system-wide information

Usage

$ docker info [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

This command displays system wide information regarding the Docker installation. Information displayed includes the kernel version, number of containers and images. The number of images shown is the number of unique images. The same image tagged under different names is counted only once.

If a format is specified, the given template will be executed instead of the default format. Go’s text/template package describes all the details of the format.

Depending on the storage driver in use, additional information can be shown, such as pool name, data file, metadata file, data space used, total data space, metadata space used, and total metadata space.

The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where /var/lib/docker is mounted.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template

Examples

Show output

The example below shows the output for a daemon running on Red Hat Enterprise Linux, using the devicemapper storage driver. As can be seen in the output, additional information about the devicemapper storage driver is shown:

$ docker info
+
+Client:
+ Context:    default
+ Debug Mode: false
+
+Server:
+ Containers: 14
+  Running: 3
+  Paused: 1
+  Stopped: 10
+ Images: 52
+ Server Version: 1.10.3
+ Storage Driver: devicemapper
+  Pool Name: docker-202:2-25583803-pool
+  Pool Blocksize: 65.54 kB
+  Base Device Size: 10.74 GB
+  Backing Filesystem: xfs
+  Data file: /dev/loop0
+  Metadata file: /dev/loop1
+  Data Space Used: 1.68 GB
+  Data Space Total: 107.4 GB
+  Data Space Available: 7.548 GB
+  Metadata Space Used: 2.322 MB
+  Metadata Space Total: 2.147 GB
+  Metadata Space Available: 2.145 GB
+  Udev Sync Supported: true
+  Deferred Removal Enabled: false
+  Deferred Deletion Enabled: false
+  Deferred Deleted Device Count: 0
+  Data loop file: /var/lib/docker/devicemapper/devicemapper/data
+  Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata
+  Library Version: 1.02.107-RHEL7 (2015-12-01)
+ Execution Driver: native-0.2
+ Logging Driver: json-file
+ Plugins:
+  Volume: local
+  Network: null host bridge
+ Kernel Version: 3.10.0-327.el7.x86_64
+ Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo)
+ OSType: linux
+ Architecture: x86_64
+ CPUs: 1
+ Total Memory: 991.7 MiB
+ Name: ip-172-30-0-91.ec2.internal
+ ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S
+ Docker Root Dir: /var/lib/docker
+ Debug Mode: false
+ Username: gordontheturtle
+ Registry: https://index.docker.io/v1/
+ Insecure registries:
+  myinsecurehost:5000
+  127.0.0.0/8
+

Show debugging output

Here is a sample output for a daemon running on Ubuntu, using the overlay2 storage driver and a node that is part of a 2-node swarm:

$ docker --debug info
+
+Client:
+ Context:    default
+ Debug Mode: true
+
+Server:
+ Containers: 14
+  Running: 3
+  Paused: 1
+  Stopped: 10
+ Images: 52
+ Server Version: 1.13.0
+ Storage Driver: overlay2
+  Backing Filesystem: extfs
+  Supports d_type: true
+  Native Overlay Diff: false
+ Logging Driver: json-file
+ Cgroup Driver: cgroupfs
+ Plugins:
+  Volume: local
+  Network: bridge host macvlan null overlay
+ Swarm: active
+  NodeID: rdjq45w1op418waxlairloqbm
+  Is Manager: true
+  ClusterID: te8kdyw33n36fqiz74bfjeixd
+  Managers: 1
+  Nodes: 2
+  Orchestration:
+   Task History Retention Limit: 5
+  Raft:
+   Snapshot Interval: 10000
+   Number of Old Snapshots to Retain: 0
+   Heartbeat Tick: 1
+   Election Tick: 3
+  Dispatcher:
+   Heartbeat Period: 5 seconds
+  CA Configuration:
+   Expiry Duration: 3 months
+  Root Rotation In Progress: false
+  Node Address: 172.16.66.128 172.16.66.129
+  Manager Addresses:
+   172.16.66.128:2477
+ Runtimes: runc
+ Default Runtime: runc
+ Init Binary: docker-init
+ containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531
+ runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2
+ init version: N/A (expected: v0.13.0)
+ Security Options:
+  apparmor
+  seccomp
+   Profile: default
+ Kernel Version: 4.4.0-31-generic
+ Operating System: Ubuntu 16.04.1 LTS
+ OSType: linux
+ Architecture: x86_64
+ CPUs: 2
+ Total Memory: 1.937 GiB
+ Name: ubuntu
+ ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326
+ Docker Root Dir: /var/lib/docker
+ Debug Mode: true
+  File Descriptors: 30
+  Goroutines: 123
+  System Time: 2016-11-12T17:24:37.955404361-08:00
+  EventsListeners: 0
+ Http Proxy: http://test:test@proxy.example.com:8080
+ Https Proxy: https://test:test@proxy.example.com:8080
+ No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com
+ Registry: https://index.docker.io/v1/
+ WARNING: No swap limit support
+ Labels:
+  storage=ssd
+  staging=true
+ Experimental: false
+ Insecure Registries:
+  127.0.0.0/8
+ Registry Mirrors:
+   http://192.168.1.2/
+   http://registry-mirror.example.com:5000/
+ Live Restore Enabled: false
+

The global -D option causes all docker commands to output debug information.

Format the output

You can also specify the output format:

$ docker info --format '{{json .}}'
+
+{"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...}
+

Run docker info on Windows

Here is a sample output for a daemon running on Windows Server 2016:

E:\docker>docker info
+Client:
+ Context:    default
+ Debug Mode: false
+
+Server:
+ Containers: 1
+  Running: 0
+  Paused: 0
+  Stopped: 1
+ Images: 17
+ Server Version: 1.13.0
+ Storage Driver: windowsfilter
+  Windows:
+ Logging Driver: json-file
+ Plugins:
+  Volume: local
+  Network: nat null overlay
+ Swarm: inactive
+ Default Isolation: process
+ Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937)
+ Operating System: Windows Server 2016 Datacenter
+ OSType: windows
+ Architecture: x86_64
+ CPUs: 8
+ Total Memory: 3.999 GiB
+ Name: WIN-V0V70C0LU5P
+ ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62
+ Docker Root Dir: C:\control
+ Debug Mode: false
+ Registry: https://index.docker.io/v1/
+ Insecure Registries:
+  127.0.0.0/8
+ Registry Mirrors:
+   http://192.168.1.2/
+   http://registry-mirror.example.com:5000/
+ Live Restore Enabled: false
+

Warnings about kernel support

If your operating system does not enable certain capabilities, you may see warnings such as one of the following, when you run docker info:

WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.
+
WARNING: No swap limit support
+

You can ignore these warnings unless you actually need the ability to limit these resources, in which case you should consult your operating system’s documentation for enabling them. Learn more.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/info/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Finspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Finspect%2Findex.html new file mode 100644 index 00000000..c999cd8d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Finspect%2Findex.html @@ -0,0 +1,18 @@ +

docker inspect


Return low-level information on Docker objects

Usage

$ docker inspect [OPTIONS] NAME|ID [NAME|ID...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Docker inspect provides detailed information on constructs controlled by Docker.

By default, docker inspect will render results in a JSON array.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
+--size , -s + Display total file sizes if the type is container
--type Return JSON for specified type

Examples

Get an instance’s IP address

For the most part, you can pick out any field from the JSON in a fairly straightforward manner.

$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID
+

Get an instance’s MAC address

$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID
+

Get an instance’s log path

$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID
+

Get an instance’s image name

$ docker inspect --format='{{.Config.Image}}' $INSTANCE_ID
+

List all port bindings

You can loop over arrays and maps in the results to produce simple text output:

$ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
+

Find a specific port mapping

The .Field syntax doesn’t work when the field name begins with a number, but the template language’s index function does. The .NetworkSettings.Ports section contains a map of the internal port mappings to a list of external address/port objects. To grab just the numeric public port, you use index to find the specific port map, and then index 0 contains the first object inside of that. Then we ask for the HostPort field to get the public address.

$ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
+

Get a subsection in JSON format

If you request a field which is itself a structure containing other fields, by default you get a Go-style dump of the inner values. Docker adds a template function, json, which can be applied to get results in JSON format.

$ docker inspect --format='{{json .Config}}' $INSTANCE_ID
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fkill%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fkill%2Findex.html new file mode 100644 index 00000000..4d96197d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fkill%2Findex.html @@ -0,0 +1,14 @@ +

docker kill


Kill one or more running containers

Usage

$ docker kill [OPTIONS] CONTAINER [CONTAINER...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker kill subcommand kills one or more containers. The main process inside the container is sent SIGKILL signal (default), or the signal that is specified with the --signal option. You can reference a container by its ID, ID-prefix, or name.

The --signal (or -s shorthand) flag sets the system call signal that is sent to the container. This signal can be a signal name in the format SIG<NAME>, for instance SIGINT, or an unsigned number that matches a position in the kernel’s syscall table, for instance 2.

While the default (SIGKILL) signal will terminate the container, the signal set through --signal may be non-terminal, depending on the container’s main process. For example, the SIGHUP signal in most cases will be non-terminal, and the container will continue running after receiving the signal.

Note

ENTRYPOINT and CMD in the shell form run as a child process of /bin/sh -c, which does not pass signals. This means that the executable is not the container’s PID 1 and does not receive Unix signals.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--signal , -s + KILL Signal to send to the container

Examples

Send a KILL signal to a container

The following example sends the default SIGKILL signal to the container named my_container:

$ docker kill my_container
+

Send a custom signal to a container

The following example sends a SIGHUP signal to the container named my_container:

$ docker kill --signal=SIGHUP  my_container
+

You can specify a custom signal either by name, or number. The SIG prefix is optional, so the following examples are equivalent:

$ docker kill --signal=SIGHUP my_container
+$ docker kill --signal=HUP my_container
+$ docker kill --signal=1 my_container
+

Refer to the signal(7) man-page for a list of standard Linux signals.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/kill/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fload%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fload%2Findex.html new file mode 100644 index 00000000..ab5d832b --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fload%2Findex.html @@ -0,0 +1,36 @@ +

docker load


Load an image from a tar archive or STDIN

Usage

$ docker load [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Load an image or repository from a tar archive (even if compressed with gzip, bzip2, or xz) from a file or STDIN. It restores both images and tags.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--input , -i + Read from tar archive file, instead of STDIN
+--quiet , -q + Suppress the load output

Examples

$ docker image ls
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+
+$ docker load < busybox.tar.gz
+
+Loaded image: busybox:latest
+$ docker images
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+busybox             latest              769b9341d937        7 weeks ago         2.489 MB
+
+$ docker load --input fedora.tar
+
+Loaded image: fedora:rawhide
+
+Loaded image: fedora:20
+
+$ docker images
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+busybox             latest              769b9341d937        7 weeks ago         2.489 MB
+fedora              rawhide             0d20aec6529d        7 weeks ago         387 MB
+fedora              20                  58394af37342        7 weeks ago         385.5 MB
+fedora              heisenbug           58394af37342        7 weeks ago         385.5 MB
+fedora              latest              58394af37342        7 weeks ago         385.5 MB
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/load/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Flogin%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Flogin%2Findex.html new file mode 100644 index 00000000..047c0030 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Flogin%2Findex.html @@ -0,0 +1,33 @@ +

docker login


Log in to a Docker registry

Usage

$ docker login [OPTIONS] [SERVER]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Login to a registry.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--password , -p + Password
--password-stdin Take the password from stdin
+--username , -u + Username

Examples

Login to a self-hosted registry

If you want to login to a self-hosted registry you can specify this by adding the server name.

$ docker login localhost:8080
+

Provide a password using STDIN

To run the docker login command non-interactively, you can set the --password-stdin flag to provide a password through STDIN. Using STDIN prevents the password from ending up in the shell’s history, or log-files.

The following example reads a password from a file, and passes it to the docker login command using STDIN:

$ cat ~/my_password.txt | docker login --username foo --password-stdin
+

Privileged user requirement

docker login requires user to use sudo or be root, except when:

  1. connecting to a remote daemon, such as a docker-machine provisioned docker engine.
  2. user is added to the docker group. This will impact the security of your system; the docker group is root equivalent. See Docker Daemon Attack Surface for details.

You can log into any public or private repository for which you have credentials. When you log in, the command stores credentials in $HOME/.docker/config.json on Linux or %USERPROFILE%/.docker/config.json on Windows, via the procedure described below.

Credentials store

The Docker Engine can keep user credentials in an external credentials store, such as the native keychain of the operating system. Using an external store is more secure than storing credentials in the Docker configuration file.

To use a credentials store, you need an external helper program to interact with a specific keychain or external store. Docker requires the helper program to be in the client’s host $PATH.

This is the list of currently available credentials helpers and where you can download them from:

Configure the credentials store

You need to specify the credentials store in $HOME/.docker/config.json to tell the docker engine to use it. The value of the config property should be the suffix of the program to use (i.e. everything after docker-credential-). For example, to use docker-credential-osxkeychain:

{
+  "credsStore": "osxkeychain"
+}
+

If you are currently logged in, run docker logout to remove the credentials from the file and run docker login again.

Default behavior

By default, Docker looks for the native binary on each of the platforms, i.e. “osxkeychain” on macOS, “wincred” on windows, and “pass” on Linux. A special case is that on Linux, Docker will fall back to the “secretservice” binary if it cannot find the “pass” binary. If none of these binaries are present, it stores the credentials (i.e. password) in base64 encoding in the config files described above.

Credential helper protocol

Credential helpers can be any program or script that follows a very simple protocol. This protocol is heavily inspired by Git, but it differs in the information shared.

The helpers always use the first argument in the command to identify the action. There are only three possible values for that argument: store, get, and erase.

The store command takes a JSON payload from the standard input. That payload carries the server address, to identify the credential, the user name, and either a password or an identity token.

{
+  "ServerURL": "https://index.docker.io/v1",
+  "Username": "david",
+  "Secret": "passw0rd1"
+}
+

If the secret being stored is an identity token, the Username should be set to <token>.

The store command can write error messages to STDOUT that the docker engine will show if there was an issue.

The get command takes a string payload from the standard input. That payload carries the server address that the docker engine needs credentials for. This is an example of that payload: https://index.docker.io/v1.

The get command writes a JSON payload to STDOUT. Docker reads the user name and password from this payload:

{
+  "Username": "david",
+  "Secret": "passw0rd1"
+}
+

The erase command takes a string payload from STDIN. That payload carries the server address that the docker engine wants to remove credentials for. This is an example of that payload: https://index.docker.io/v1.

The erase command can write error messages to STDOUT that the docker engine will show if there was an issue.

Credential helpers

Credential helpers are similar to the credential store above, but act as the designated programs to handle credentials for specific registries. The default credential store (credsStore or the config file itself) will not be used for operations concerning credentials of the specified registries.

Configure credential helpers

If you are currently logged in, run docker logout to remove the credentials from the default store.

Credential helpers are specified in a similar way to credsStore, but allow for multiple helpers to be configured at a time. Keys specify the registry domain, and values specify the suffix of the program to use (i.e. everything after docker-credential-). For example:

{
+  "credHelpers": {
+    "registry.example.com": "registryhelper",
+    "awesomereg.example.org": "hip-star",
+    "unicorn.example.io": "vcbait"
+  }
+}
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/login/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Flogout%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Flogout%2Findex.html new file mode 100644 index 00000000..225c2784 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Flogout%2Findex.html @@ -0,0 +1,8 @@ +

docker logout


Log out from a Docker registry

Usage

$ docker logout [SERVER]
+

For example uses of this command, refer to the examples section below.

Examples

$ docker logout localhost:8080
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/logout/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Flogs%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Flogs%2Findex.html new file mode 100644 index 00000000..0600b331 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Flogs%2Findex.html @@ -0,0 +1,20 @@ +

docker logs


Fetch the logs of a container

Usage

$ docker logs [OPTIONS] CONTAINER
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker logs command batch-retrieves logs present at the time of execution.

Note

This command is only functional for containers that are started with the json-file or journald logging driver.

For more information about selecting and configuring logging drivers, refer to Configure logging drivers.

The docker logs --follow command will continue streaming the new output from the container’s STDOUT and STDERR.

Passing a negative number or a non-integer to --tail is invalid and the value is set to all in that case.

The docker logs --timestamps command will add an RFC3339Nano timestamp , for example 2014-09-16T06:17:46.000000000Z, to each log entry. To ensure that the timestamps are aligned the nano-second part of the timestamp will be padded with zero when necessary.

The docker logs --details command will add on extra attributes, such as environment variables and labels, provided to --log-opt when creating the container.

The --since option shows only the container logs generated after a given date. You can specify the date as an RFC 3339 date, a UNIX timestamp, or a Go duration string (e.g. 1m30s, 3h). Besides RFC3339 date format you may also use RFC3339Nano, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the client will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. You can combine the --since option with either or both of the --follow or --tail options.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--details Show extra details provided to logs
+--follow , -f + Follow log output
--since Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)
+--tail , -n + all Number of lines to show from the end of the logs
+--timestamps , -t + Show timestamps
--until Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)

Examples

Retrieve logs until a specific point in time

In order to retrieve logs before a specific point in time, run:

$ docker run --name test -d busybox sh -c "while true; do $(echo date); sleep 1; done"
+$ date
+Tue 14 Nov 2017 16:40:00 CET
+$ docker logs -f --until=2s test
+Tue 14 Nov 2017 16:40:00 CET
+Tue 14 Nov 2017 16:40:01 CET
+Tue 14 Nov 2017 16:40:02 CET
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/logs/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest%2Findex.html new file mode 100644 index 00000000..5ba7db4a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest%2Findex.html @@ -0,0 +1,154 @@ +

docker manifest


Manage Docker image manifests and manifest lists

This command is experimental.

Experimental features are intended for testing and feedback as their functionality or UX may change between releases without warning or can be removed entirely in a future release.

Usage

$ docker manifest COMMAND COMMAND
+

Description

The docker manifest command by itself performs no action. In order to operate on a manifest or manifest list, one of the subcommands must be used.

A single manifest is information about an image, such as layers, size, and digest. The docker manifest command also gives users additional information such as the os and architecture an image was built for.

A manifest list is a list of image layers that is created by specifying one or more (ideally more than one) image names. It can then be used in the same way as an image name in docker pull and docker run commands, for example.

Ideally a manifest list is created from images that are identical in function for different os/arch combinations. For this reason, manifest lists are often referred to as “multi-arch images”. However, a user could create a manifest list that points to two images -- one for windows on amd64, and one for darwin on amd64.

manifest inspect

$ docker manifest inspect --help
+
+Usage:  docker manifest inspect [OPTIONS] [MANIFEST_LIST] MANIFEST
+
+Display an image manifest, or manifest list
+
+Options:
+      --help       Print usage
+      --insecure   Allow communication with an insecure registry
+  -v, --verbose    Output additional info including layers and platform
+

manifest create

Usage:  docker manifest create MANIFEST_LIST MANIFEST [MANIFEST...]
+
+Create a local manifest list for annotating and pushing to a registry
+
+Options:
+  -a, --amend      Amend an existing manifest list
+      --insecure   Allow communication with an insecure registry
+      --help       Print usage
+

manifest annotate

Usage:  docker manifest annotate [OPTIONS] MANIFEST_LIST MANIFEST
+
+Add additional information to a local image manifest
+
+Options:
+      --arch string               Set architecture
+      --help                      Print usage
+      --os string                 Set operating system
+      --os-version string         Set operating system version
+      --os-features stringSlice   Set operating system feature
+      --variant string            Set architecture variant
+
+

manifest push

Usage:  docker manifest push [OPTIONS] MANIFEST_LIST
+
+Push a manifest list to a repository
+
+Options:
+      --help       Print usage
+      --insecure   Allow push to an insecure registry
+  -p, --purge      Remove the local manifest list after push
+

Working with insecure registries

The manifest command interacts solely with a Docker registry. Because of this, it has no way to query the engine for the list of allowed insecure registries. To allow the CLI to interact with an insecure registry, some docker manifest commands have an --insecure flag. For each transaction, such as a create, which queries a registry, the --insecure flag must be specified. This flag tells the CLI that this registry call may ignore security concerns like missing or self-signed certificates. Likewise, on a manifest push to an insecure registry, the --insecure flag must be specified. If this is not used with an insecure registry, the manifest command fails to find a registry that meets the default requirements.

For example uses of this command, refer to the examples section below.

Examples

Inspect an image’s manifest object

$ docker manifest inspect hello-world
+{
+        "schemaVersion": 2,
+        "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+        "config": {
+                "mediaType": "application/vnd.docker.container.image.v1+json",
+                "size": 1520,
+                "digest": "sha256:1815c82652c03bfd8644afda26fb184f2ed891d921b20a0703b46768f9755c57"
+        },
+        "layers": [
+                {
+                        "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+                        "size": 972,
+                        "digest": "sha256:b04784fba78d739b526e27edc02a5a8cd07b1052e9283f5fc155828f4b614c28"
+                }
+        ]
+}
+

Inspect an image’s manifest and get the os/arch info

The docker manifest inspect command takes an optional --verbose flag that gives you the image’s name (Ref), and architecture and os (Platform).

Just as with other docker commands that take image names, you can refer to an image with or without a tag, or by digest (e.g. hello-world@sha256:f3b3b28a45160805bb16542c9531888519430e9e6d6ffc09d72261b0d26ff74f).

Here is an example of inspecting an image’s manifest with the --verbose flag:

$ docker manifest inspect --verbose hello-world
+{
+        "Ref": "docker.io/library/hello-world:latest",
+        "Digest": "sha256:f3b3b28a45160805bb16542c9531888519430e9e6d6ffc09d72261b0d26ff74f",
+        "SchemaV2Manifest": {
+                "schemaVersion": 2,
+                "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+                "config": {
+                        "mediaType": "application/vnd.docker.container.image.v1+json",
+                        "size": 1520,
+                        "digest": "sha256:1815c82652c03bfd8644afda26fb184f2ed891d921b20a0703b46768f9755c57"
+                },
+                "layers": [
+                        {
+                                "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
+                                "size": 972,
+                                "digest": "sha256:b04784fba78d739b526e27edc02a5a8cd07b1052e9283f5fc155828f4b614c28"
+                        }
+                ]
+        },
+        "Platform": {
+                "architecture": "amd64",
+                "os": "linux"
+        }
+}
+

Create and push a manifest list

To create a manifest list, you first create the manifest list locally by specifying the constituent images you would like to have included in your manifest list. Keep in mind that this is pushed to a registry, so if you want to push to a registry other than the docker registry, you need to create your manifest list with the registry name or IP and port. This is similar to tagging an image and pushing it to a foreign registry.

After you have created your local copy of the manifest list, you may optionally annotate it. Annotations allowed are the architecture and operating system (overriding the image’s current values), os features, and an architecture variant.

Finally, you need to push your manifest list to the desired registry. Below are descriptions of these three commands, and an example putting them all together.

$ docker manifest create 45.55.81.106:5000/coolapp:v1 \
+    45.55.81.106:5000/coolapp-ppc64le-linux:v1 \
+    45.55.81.106:5000/coolapp-arm-linux:v1 \
+    45.55.81.106:5000/coolapp-amd64-linux:v1 \
+    45.55.81.106:5000/coolapp-amd64-windows:v1
+
+Created manifest list 45.55.81.106:5000/coolapp:v1
+
$ docker manifest annotate 45.55.81.106:5000/coolapp:v1 45.55.81.106:5000/coolapp-arm-linux --arch arm
+
$ docker manifest push 45.55.81.106:5000/coolapp:v1
+Pushed manifest 45.55.81.106:5000/coolapp@sha256:9701edc932223a66e49dd6c894a11db8c2cf4eccd1414f1ec105a623bf16b426 with digest: sha256:f67dcc5fc786f04f0743abfe0ee5dae9bd8caf8efa6c8144f7f2a43889dc513b
+Pushed manifest 45.55.81.106:5000/coolapp@sha256:f3b3b28a45160805bb16542c9531888519430e9e6d6ffc09d72261b0d26ff74f with digest: sha256:b64ca0b60356a30971f098c92200b1271257f100a55b351e6bbe985638352f3a
+Pushed manifest 45.55.81.106:5000/coolapp@sha256:39dc41c658cf25f33681a41310372f02728925a54aac3598310bfb1770615fc9 with digest: sha256:df436846483aff62bad830b730a0d3b77731bcf98ba5e470a8bbb8e9e346e4e8
+Pushed manifest 45.55.81.106:5000/coolapp@sha256:f91b1145cd4ac800b28122313ae9e88ac340bb3f1e3a4cd3e59a3648650f3275 with digest: sha256:5bb8e50aa2edd408bdf3ddf61efb7338ff34a07b762992c9432f1c02fc0e5e62
+sha256:050b213d49d7673ba35014f21454c573dcbec75254a08f4a7c34f66a47c06aba
+
+

Inspect a manifest list

$ docker manifest inspect coolapp:v1
+{
+   "schemaVersion": 2,
+   "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
+   "manifests": [
+      {
+         "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+         "size": 424,
+         "digest": "sha256:f67dcc5fc786f04f0743abfe0ee5dae9bd8caf8efa6c8144f7f2a43889dc513b",
+         "platform": {
+            "architecture": "arm",
+            "os": "linux"
+         }
+      },
+      {
+         "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+         "size": 424,
+         "digest": "sha256:b64ca0b60356a30971f098c92200b1271257f100a55b351e6bbe985638352f3a",
+         "platform": {
+            "architecture": "amd64",
+            "os": "linux"
+         }
+      },
+      {
+         "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+         "size": 425,
+         "digest": "sha256:df436846483aff62bad830b730a0d3b77731bcf98ba5e470a8bbb8e9e346e4e8",
+         "platform": {
+            "architecture": "ppc64le",
+            "os": "linux"
+         }
+      },
+      {
+         "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
+         "size": 425,
+         "digest": "sha256:5bb8e50aa2edd408bdf3ddf61efb7338ff34a07b762992c9432f1c02fc0e5e62",
+         "platform": {
+            "architecture": "s390x",
+            "os": "linux"
+         }
+      }
+   ]
+}
+

Push to an insecure registry

Here is an example of creating and pushing a manifest list using a known insecure registry.

$ docker manifest create --insecure myprivateregistry.mycompany.com/repo/image:1.0 \
+    myprivateregistry.mycompany.com/repo/image-linux-ppc64le:1.0 \
+    myprivateregistry.mycompany.com/repo/image-linux-s390x:1.0 \
+    myprivateregistry.mycompany.com/repo/image-linux-arm:1.0 \
+    myprivateregistry.mycompany.com/repo/image-linux-armhf:1.0 \
+    myprivateregistry.mycompany.com/repo/image-windows-amd64:1.0 \
+    myprivateregistry.mycompany.com/repo/image-linux-amd64:1.0
+
+$ docker manifest push --insecure myprivateregistry.mycompany.com/repo/image:tag
+

Note

The --insecure flag is not required to annotate a manifest list, since annotations are to a locally-stored copy of a manifest list. You may also skip the --insecure flag if you are performing a docker manifest inspect on a locally-stored manifest list. Be sure to keep in mind that locally-stored manifest lists are never used by the engine on a docker pull.

Child commands

Command Description
docker manifest annotate Add additional information to a local image manifest
docker manifest create Create a local manifest list for annotating and pushing to a registry
docker manifest inspect Display an image manifest, or manifest list
docker manifest push Push a manifest list to a repository
docker manifest rm Delete one or more manifest lists from local storage
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/manifest/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_annotate%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_annotate%2Findex.html new file mode 100644 index 00000000..e4a1af93 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_annotate%2Findex.html @@ -0,0 +1,7 @@ +

docker manifest annotate


Add additional information to a local image manifest

This command is experimental.

Experimental features are intended for testing and feedback as their functionality or UX may change between releases without warning or can be removed entirely in a future release.

Usage

$ docker manifest annotate [OPTIONS] MANIFEST_LIST MANIFEST
+

Options

Name, shorthand Default Description
--arch Set architecture
--os Set operating system
--os-features Set operating system feature
--os-version Set operating system version
--variant Set architecture variant

Parent command

Command Description
docker manifest Manage Docker image manifests and manifest lists
Command Description
docker manifest annotate Add additional information to a local image manifest
docker manifest create Create a local manifest list for annotating and pushing to a registry
docker manifest inspect Display an image manifest, or manifest list
docker manifest push Push a manifest list to a repository
docker manifest rm Delete one or more manifest lists from local storage
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/manifest_annotate/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_create%2Findex.html new file mode 100644 index 00000000..67e1cc7c --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_create%2Findex.html @@ -0,0 +1,9 @@ +

docker manifest create


Create a local manifest list for annotating and pushing to a registry

This command is experimental.

Experimental features are intended for testing and feedback as their functionality or UX may change between releases without warning or can be removed entirely in a future release.

Usage

$ docker manifest create MANIFEST_LIST MANIFEST [MANIFEST...]
+

Options

Name, shorthand Default Description
+--amend , -a + Amend an existing manifest list
--insecure Allow communication with an insecure registry

Parent command

Command Description
docker manifest Manage Docker image manifests and manifest lists
Command Description
docker manifest annotate Add additional information to a local image manifest
docker manifest create Create a local manifest list for annotating and pushing to a registry
docker manifest inspect Display an image manifest, or manifest list
docker manifest push Push a manifest list to a repository
docker manifest rm Delete one or more manifest lists from local storage
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/manifest_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_inspect%2Findex.html new file mode 100644 index 00000000..5f2dbe16 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_inspect%2Findex.html @@ -0,0 +1,9 @@ +

docker manifest inspect


Display an image manifest, or manifest list

This command is experimental.

Experimental features are intended for testing and feedback as their functionality or UX may change between releases without warning or can be removed entirely in a future release.

Usage

$ docker manifest inspect [OPTIONS] [MANIFEST_LIST] MANIFEST
+

Options

Name, shorthand Default Description
--insecure Allow communication with an insecure registry
+--verbose , -v + Output additional info including layers and platform

Parent command

Command Description
docker manifest Manage Docker image manifests and manifest lists
Command Description
docker manifest annotate Add additional information to a local image manifest
docker manifest create Create a local manifest list for annotating and pushing to a registry
docker manifest inspect Display an image manifest, or manifest list
docker manifest push Push a manifest list to a repository
docker manifest rm Delete one or more manifest lists from local storage
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/manifest_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_push%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_push%2Findex.html new file mode 100644 index 00000000..2c5e4949 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_push%2Findex.html @@ -0,0 +1,9 @@ +

docker manifest push


Push a manifest list to a repository

This command is experimental.

Experimental features are intended for testing and feedback as their functionality or UX may change between releases without warning or can be removed entirely in a future release.

Usage

$ docker manifest push [OPTIONS] MANIFEST_LIST
+

Options

Name, shorthand Default Description
--insecure Allow push to an insecure registry
+--purge , -p + Remove the local manifest list after push

Parent command

Command Description
docker manifest Manage Docker image manifests and manifest lists
Command Description
docker manifest annotate Add additional information to a local image manifest
docker manifest create Create a local manifest list for annotating and pushing to a registry
docker manifest inspect Display an image manifest, or manifest list
docker manifest push Push a manifest list to a repository
docker manifest rm Delete one or more manifest lists from local storage
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/manifest_push/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_rm%2Findex.html new file mode 100644 index 00000000..b7d254b9 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fmanifest_rm%2Findex.html @@ -0,0 +1,7 @@ +

docker manifest rm


Delete one or more manifest lists from local storage

This command is experimental.

Experimental features are intended for testing and feedback as their functionality or UX may change between releases without warning or can be removed entirely in a future release.

Usage

$ docker manifest rm MANIFEST_LIST [MANIFEST_LIST...]
+

Parent command

Command Description
docker manifest Manage Docker image manifests and manifest lists
Command Description
docker manifest annotate Add additional information to a local image manifest
docker manifest create Create a local manifest list for annotating and pushing to a registry
docker manifest inspect Display an image manifest, or manifest list
docker manifest push Push a manifest list to a repository
docker manifest rm Delete one or more manifest lists from local storage
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/manifest_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork%2Findex.html new file mode 100644 index 00000000..7c8b391d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork%2Findex.html @@ -0,0 +1,7 @@ +

docker network


Manage networks

Usage

$ docker network COMMAND
+

Description

Manage networks. You can use subcommands to create, inspect, list, remove, prune, connect, and disconnect networks.

Child commands

Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_connect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_connect%2Findex.html new file mode 100644 index 00000000..9ceb8149 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_connect%2Findex.html @@ -0,0 +1,14 @@ +

docker network connect


Connect a container to a network

Usage

$ docker network connect [OPTIONS] NETWORK CONTAINER
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Connects a container to a network. You can connect a container by name or by ID. Once connected, the container can communicate with other containers in the same network.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--alias Add network-scoped alias for the container
--driver-opt driver options for the network
--ip IPv4 address (e.g., 172.30.100.104)
--ip6 IPv6 address (e.g., 2001:db8::33)
--link Add link to another container
--link-local-ip Add a link-local address for the container

Examples

Connect a running container to a network

$ docker network connect multi-host-network container1
+

Connect a container to a network when it starts

You can also use the docker run --network=<network-name> option to start a container and immediately connect it to a network.

$ docker run -itd --network=multi-host-network busybox
+

Specify the IP address a container will use on a given network

You can specify the IP address you want to be assigned to the container’s interface.

$ docker network connect --ip 10.10.36.122 multi-host-network container2
+

You can use --link option to link another container with a preferred alias

$ docker network connect --link container1:c1 multi-host-network container2
+

Create a network alias for a container

--alias option can be used to resolve the container by another name in the network being connected to.

$ docker network connect --alias db --alias mysql multi-host-network container2
+

Network implications of stopping, pausing, or restarting containers

You can pause, restart, and stop containers that are connected to a network. A container connects to its configured networks when it runs.

If specified, the container’s IP address(es) is reapplied when a stopped container is restarted. If the IP address is no longer available, the container fails to start. One way to guarantee that the IP address is available is to specify an --ip-range when creating the network, and choose the static IP address(es) from outside that range. This ensures that the IP address is not given to another container while this container is not on the network.

$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network
+
$ docker network connect --ip 172.20.128.2 multi-host-network container2
+

To verify the container is connected, use the docker network inspect command. Use docker network disconnect to remove a container from the network.

Once connected in network, containers can communicate using only another container’s IP address or name. For overlay networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way.

You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks.

Parent command

Command Description
docker network Manage networks
Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network_connect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_create%2Findex.html new file mode 100644 index 00000000..f9a8a723 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_create%2Findex.html @@ -0,0 +1,38 @@ +

docker network create


Create a network

Usage

$ docker network create [OPTIONS] NETWORK
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Creates a new network. The DRIVER accepts bridge or overlay which are the built-in network drivers. If you have installed a third party or your own custom network driver you can specify that DRIVER here also. If you don’t specify the --driver option, the command automatically creates a bridge network for you. When you install Docker Engine it creates a bridge network automatically. This network corresponds to the docker0 bridge that Engine has traditionally relied on. When you launch a new container with docker run it automatically connects to this bridge network. You cannot remove this default bridge network, but you can create new ones using the network create command.

$ docker network create -d bridge my-bridge-network
+

Bridge networks are isolated networks on a single Engine installation. If you want to create a network that spans multiple Docker hosts each running an Engine, you must create an overlay network. Unlike bridge networks, overlay networks require some pre-existing conditions before you can create one. These conditions are:

The dockerd options that support the overlay network are:

To read more about these options and how to configure them, see Get started with multi-host network.

While not required, it is a good idea to install Docker Swarm to manage the cluster that makes up your network. Swarm provides sophisticated discovery and server management tools that can assist your implementation.

Once you have prepared the overlay network prerequisites you simply choose a Docker host in the cluster and issue the following to create the network:

$ docker network create -d overlay my-multihost-network
+

Network names must be unique. The Docker daemon attempts to identify naming conflicts but this is not guaranteed. It is the user’s responsibility to avoid name conflicts.

Overlay network limitations

You should create overlay networks with /24 blocks (the default), which limits you to 256 IP addresses, when you create networks using the default VIP-based endpoint-mode. This recommendation addresses limitations with swarm mode. If you need more than 256 IP addresses, do not increase the IP block size. You can either use dnsrr endpoint mode with an external load balancer, or use multiple smaller overlay networks. See Configure service discovery for more information about different endpoint modes.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--attachable Enable manual container attachment
--aux-address Auxiliary IPv4 or IPv6 addresses used by Network driver
--config-from The network from which to copy the configuration
--config-only Create a configuration only network
+--driver , -d + bridge Driver to manage the Network
--gateway IPv4 or IPv6 Gateway for the master subnet
--ingress Create swarm routing-mesh network
--internal Restrict external access to the network
--ip-range Allocate container ip from a sub-range
--ipam-driver IP Address Management Driver
--ipam-opt Set IPAM driver specific options
--ipv6 Enable IPv6 networking
--label Set metadata on a network
+--opt , -o + Set driver specific options
--scope Control the network's scope
--subnet Subnet in CIDR format that represents a network segment

Examples

Connect containers

When you start a container, use the --network flag to connect it to a network. This example adds the busybox container to the mynet network:

$ docker run -itd --network=mynet busybox
+

If you want to add a container to a network after the container is already running, use the docker network connect subcommand.

You can connect multiple containers to the same network. Once connected, the containers can communicate using only another container’s IP address or name. For overlay networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way.

You can disconnect a container from a network using the docker network disconnect command.

Specify advanced options

When you create a network, Engine creates a non-overlapping subnetwork for the network by default. This subnetwork is not a subdivision of an existing network. It is purely for ip-addressing purposes. You can override this default and specify subnetwork values directly using the --subnet option. On a bridge network you can only create a single subnet:

$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0
+

Additionally, you also specify the --gateway --ip-range and --aux-address options.

$ docker network create \
+  --driver=bridge \
+  --subnet=172.28.0.0/16 \
+  --ip-range=172.28.5.0/24 \
+  --gateway=172.28.5.254 \
+  br0
+

If you omit the --gateway flag the Engine selects one for you from inside a preferred pool. For overlay networks and for network driver plugins that support it you can create multiple subnetworks. This example uses two /25 subnet mask to adhere to the current guidance of not having more than 256 IPs in a single overlay network. Each of the subnetworks has 126 usable addresses.

$ docker network create -d overlay \
+  --subnet=192.168.10.0/25 \
+  --subnet=192.168.20.0/25 \
+  --gateway=192.168.10.100 \
+  --gateway=192.168.20.100 \
+  --aux-address="my-router=192.168.10.5" --aux-address="my-switch=192.168.10.6" \
+  --aux-address="my-printer=192.168.20.5" --aux-address="my-nas=192.168.20.6" \
+  my-multihost-network
+

Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error.

Bridge driver options

When creating a custom network, the default network driver (i.e. bridge) has additional options that can be passed. The following are those options and the equivalent docker daemon flags used for docker0 bridge:

Option Equivalent Description
com.docker.network.bridge.name - Bridge name to be used when creating the Linux bridge
com.docker.network.bridge.enable_ip_masquerade --ip-masq Enable IP masquerading
com.docker.network.bridge.enable_icc --icc Enable or Disable Inter Container Connectivity
com.docker.network.bridge.host_binding_ipv4 --ip Default IP when binding container ports
com.docker.network.driver.mtu --mtu Set the containers network MTU
com.docker.network.container_iface_prefix - Set a custom prefix for container interfaces

The following arguments can be passed to docker network create for any network driver, again with their approximate equivalents to docker daemon.

Argument Equivalent Description
--gateway - IPv4 or IPv6 Gateway for the master subnet
--ip-range --fixed-cidr Allocate IPs from a range
--internal - Restrict external access to the network
--ipv6 --ipv6 Enable IPv6 networking
--subnet --bip Subnet for network

For example, let’s use -o or --opt options to specify an IP address binding when publishing ports:

$ docker network create \
+    -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \
+    simple-network
+

Network internal mode

By default, when you connect a container to an overlay network, Docker also connects a bridge network to it to provide external connectivity. If you want to create an externally isolated overlay network, you can specify the --internal option.

Network ingress mode

You can create the network which will be used to provide the routing-mesh in the swarm cluster. You do so by specifying --ingress when creating the network. Only one ingress network can be created at the time. The network can be removed only if no services depend on it. Any option available when creating an overlay network is also available when creating the ingress network, besides the --attachable option.

$ docker network create -d overlay \
+  --subnet=10.11.0.0/16 \
+  --ingress \
+  --opt com.docker.network.driver.mtu=9216 \
+  --opt encrypted=true \
+  my-ingress-network
+

Parent command

Command Description
docker network Manage networks
Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_disconnect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_disconnect%2Findex.html new file mode 100644 index 00000000..ec024942 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_disconnect%2Findex.html @@ -0,0 +1,10 @@ +

docker network disconnect


Disconnect a container from a network

Usage

$ docker network disconnect [OPTIONS] NETWORK CONTAINER
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Disconnects a container from a network. The container must be running to disconnect it from the network.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force the container to disconnect from a network

Examples

$ docker network disconnect multi-host-network container1
+

Parent command

Command Description
docker network Manage networks
Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network_disconnect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_inspect%2Findex.html new file mode 100644 index 00000000..e7946091 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_inspect%2Findex.html @@ -0,0 +1,11 @@ +

docker network inspect


Display detailed information on one or more networks

Usage

$ docker network inspect [OPTIONS] NETWORK [NETWORK...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Returns information about one or more networks. By default, this command renders all results in a JSON object.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
+--verbose , -v + Verbose output for diagnostics

Parent command

Command Description
docker network Manage networks
Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_ls%2Findex.html new file mode 100644 index 00000000..44e6cc6b --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_ls%2Findex.html @@ -0,0 +1,72 @@ +

docker network ls


List networks

Usage

$ docker network ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists all the networks the Engine daemon knows about. This includes the networks that span across multiple hosts in a cluster.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Provide filter values (e.g. 'driver=bridge')
--format Pretty-print networks using a Go template
--no-trunc Do not truncate the output
+--quiet , -q + Only display network IDs

Examples

List all networks

$ docker network ls
+NETWORK ID          NAME                DRIVER          SCOPE
+7fca4eb8c647        bridge              bridge          local
+9f904ee27bf5        none                null            local
+cf03ee007fb4        host                host            local
+78b03ee04fc4        multi-host          overlay         swarm
+

Use the --no-trunc option to display the full network id:

$ docker network ls --no-trunc
+NETWORK ID                                                         NAME                DRIVER           SCOPE
+18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3   none                null             local
+c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47   host                host             local
+7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185   bridge              bridge           local
+95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd   foo                 bridge           local
+63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161   dev                 bridge           local
+

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g. --filter "foo=bar" --filter "bif=baz"). Multiple filter flags are combined as an OR filter. For example, -f type=custom -f type=builtin returns both custom and builtin networks.

The currently supported filters are:

Driver

The driver filter matches networks based on their driver.

The following example matches networks with the bridge driver:

$ docker network ls --filter driver=bridge
+NETWORK ID          NAME                DRIVER            SCOPE
+db9db329f835        test1               bridge            local
+f6e212da9dfd        test2               bridge            local
+

ID

The id filter matches on all or part of a network’s ID.

The following filter matches all networks with an ID containing the 63d1ff1f77b0... string.

$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161
+NETWORK ID          NAME                DRIVER           SCOPE
+63d1ff1f77b0        dev                 bridge           local
+

You can also filter for a substring in an ID as this shows:

$ docker network ls --filter id=95e74588f40d
+NETWORK ID          NAME                DRIVER          SCOPE
+95e74588f40d        foo                 bridge          local
+
+$ docker network ls --filter id=95e
+NETWORK ID          NAME                DRIVER          SCOPE
+95e74588f40d        foo                 bridge          local
+

Label

The label filter matches networks based on the presence of a label alone or a label and a value.

The following filter matches networks with the usage label regardless of its value.

$ docker network ls -f "label=usage"
+NETWORK ID          NAME                DRIVER         SCOPE
+db9db329f835        test1               bridge         local
+f6e212da9dfd        test2               bridge         local
+

The following filter matches networks with the usage label with the prod value.

$ docker network ls -f "label=usage=prod"
+NETWORK ID          NAME                DRIVER        SCOPE
+f6e212da9dfd        test2               bridge        local
+

Name

The name filter matches on all or part of a network’s name.

The following filter matches all networks with a name containing the foobar string.

$ docker network ls --filter name=foobar
+NETWORK ID          NAME                DRIVER       SCOPE
+06e7eef0a170        foobar              bridge       local
+

You can also filter for a substring in a name as this shows:

$ docker network ls --filter name=foo
+NETWORK ID          NAME                DRIVER       SCOPE
+95e74588f40d        foo                 bridge       local
+06e7eef0a170        foobar              bridge       local
+

Scope

The scope filter matches networks based on their scope.

The following example matches networks with the swarm scope:

$ docker network ls --filter scope=swarm
+NETWORK ID          NAME                DRIVER              SCOPE
+xbtm0v4f1lfh        ingress             overlay             swarm
+ic6r88twuu92        swarmnet            overlay             swarm
+

The following example matches networks with the local scope:

$ docker network ls --filter scope=local
+NETWORK ID          NAME                DRIVER              SCOPE
+e85227439ac7        bridge              bridge              local
+0ca0e19443ed        host                host                local
+ca13cc149a36        localnet            bridge              local
+f9e115d2de35        none                null                local
+

Type

The type filter supports two values; builtin displays predefined networks (bridge, none, host), whereas custom displays user defined networks.

The following filter matches all user defined networks:

$ docker network ls --filter type=custom
+NETWORK ID          NAME                DRIVER       SCOPE
+95e74588f40d        foo                 bridge       local
+63d1ff1f77b0        dev                 bridge       local
+

By having this flag it allows for batch cleanup. For example, use this filter to delete all user defined networks:

$ docker network rm `docker network ls --filter type=custom -q`
+

A warning will be issued when trying to remove a network that has containers attached.

Formatting

The formatting options (--format) pretty-prints networks output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Network ID
.Name Network name
.Driver Network driver
.Scope Network scope (local, global)
.IPv6 Whether IPv6 is enabled on the network or not.
.Internal Whether the network is internal or not.
.Labels All labels assigned to the network.
.Label Value of a specific label for this network. For example {{.Label "project.version"}} +
.CreatedAt Time when the network was created

When using the --format option, the network ls command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the ID and Driver entries separated by a colon (:) for all networks:

$ docker network ls --format "{{.ID}}: {{.Driver}}"
+afaaab448eb2: bridge
+d1584f8dc718: host
+391df270dc66: null
+

Parent command

Command Description
docker network Manage networks
Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_prune%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_prune%2Findex.html new file mode 100644 index 00000000..cd81b45a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_prune%2Findex.html @@ -0,0 +1,37 @@ +

docker network prune


Remove all unused networks

Usage

$ docker network prune [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Remove all unused networks. Unused networks are those which are not referenced by any containers.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--filter Provide filter values (e.g. 'until=<timestamp>')
+--force , -f + Do not prompt for confirmation

Examples

$ docker network prune
+
+WARNING! This will remove all custom networks not used by at least one container.
+Are you sure you want to continue? [y/N] y
+Deleted Networks:
+n1
+n2
+

Filtering

The filtering flag (--filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

The until filter can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative to the daemon machine’s time. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the daemon will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long.

The label filter accepts two formats. One is the label=... (label=<key> or label=<key>=<value>), which removes networks with the specified labels. The other format is the label!=... (label!=<key> or label!=<key>=<value>), which removes networks without the specified labels.

The following removes networks created more than 5 minutes ago. Note that system networks such as bridge, host, and none will never be pruned:

$ docker network ls
+
+NETWORK ID          NAME                DRIVER              SCOPE
+7430df902d7a        bridge              bridge              local
+ea92373fd499        foo-1-day-ago       bridge              local
+ab53663ed3c7        foo-1-min-ago       bridge              local
+97b91972bc3b        host                host                local
+f949d337b1f5        none                null                local
+
+$ docker network prune --force --filter until=5m
+
+Deleted Networks:
+foo-1-day-ago
+
+$ docker network ls
+
+NETWORK ID          NAME                DRIVER              SCOPE
+7430df902d7a        bridge              bridge              local
+ab53663ed3c7        foo-1-min-ago       bridge              local
+97b91972bc3b        host                host                local
+f949d337b1f5        none                null                local
+

Parent command

Command Description
docker network Manage networks
Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network_prune/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_rm%2Findex.html new file mode 100644 index 00000000..d492fc53 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnetwork_rm%2Findex.html @@ -0,0 +1,9 @@ +

docker network rm


Remove one or more networks

Usage

$ docker network rm NETWORK [NETWORK...]
+

Description

Removes one or more networks by name or identifier. To remove a network, you must first disconnect any containers connected to it.

For example uses of this command, refer to the examples section below.

Examples

Remove a network

To remove the network named ‘my-network’:

$ docker network rm my-network
+

Remove multiple networks

To delete multiple networks in a single docker network rm command, provide multiple network names or ids. The following example deletes a network with id 3695c422697f and a network named my-network:

$ docker network rm 3695c422697f my-network
+

When you specify multiple networks, the command attempts to delete each in turn. If the deletion of one network fails, the command continues to the next on the list and tries to delete that. The command reports success or failure for each deletion.

Parent command

Command Description
docker network Manage networks
Command Description
docker network connect Connect a container to a network
docker network create Create a network
docker network disconnect Disconnect a container from a network
docker network inspect Display detailed information on one or more networks
docker network ls List networks
docker network prune Remove all unused networks
docker network rm Remove one or more networks
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/network_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode%2Findex.html new file mode 100644 index 00000000..d182dc14 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode%2Findex.html @@ -0,0 +1,7 @@ +

docker node


Manage Swarm nodes

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node COMMAND
+

Description

Manage nodes.

Child commands

Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_demote%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_demote%2Findex.html new file mode 100644 index 00000000..659917c1 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_demote%2Findex.html @@ -0,0 +1,8 @@ +

docker node demote


Demote one or more nodes from manager in the swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node demote NODE [NODE...]
+

Description

Demotes an existing manager so that it is no longer a manager.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Examples

$ docker node demote <node name>
+

Parent command

Command Description
docker node Manage Swarm nodes
Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node_demote/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_inspect%2Findex.html new file mode 100644 index 00000000..1925d228 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_inspect%2Findex.html @@ -0,0 +1,119 @@ +

docker node inspect


Display detailed information on one or more nodes

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node inspect [OPTIONS] self|NODE [NODE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Returns information about a node. By default, this command renders all results in a JSON array. You can specify an alternate format to execute a given template for each result. Go’s text/template package describes all the details of the format.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
--pretty Print the information in a human friendly format

Examples

Inspect a node

$ docker node inspect swarm-manager
+
[
+  {
+    "ID": "e216jshn25ckzbvmwlnh5jr3g",
+    "Version": {
+      "Index": 10
+    },
+    "CreatedAt": "2017-05-16T22:52:44.9910662Z",
+    "UpdatedAt": "2017-05-16T22:52:45.230878043Z",
+    "Spec": {
+      "Role": "manager",
+      "Availability": "active"
+    },
+    "Description": {
+      "Hostname": "swarm-manager",
+      "Platform": {
+        "Architecture": "x86_64",
+        "OS": "linux"
+      },
+      "Resources": {
+        "NanoCPUs": 1000000000,
+        "MemoryBytes": 1039843328
+      },
+      "Engine": {
+        "EngineVersion": "17.06.0-ce",
+        "Plugins": [
+          {
+            "Type": "Volume",
+            "Name": "local"
+          },
+          {
+            "Type": "Network",
+            "Name": "overlay"
+          },
+          {
+            "Type": "Network",
+            "Name": "null"
+          },
+          {
+            "Type": "Network",
+            "Name": "host"
+          },
+          {
+            "Type": "Network",
+            "Name": "bridge"
+          },
+          {
+            "Type": "Network",
+            "Name": "overlay"
+          }
+        ]
+      },
+      "TLSInfo": {
+        "TrustRoot": "-----BEGIN CERTIFICATE-----\nMIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy\nNDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g\nAh8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO\nPQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3\nzONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ==\n-----END CERTIFICATE-----\n",
+        "CertIssuerSubject": "MBMxETAPBgNVBAMTCHN3YXJtLWNh",
+        "CertIssuerPublicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ=="
+      }
+    },
+    "Status": {
+      "State": "ready",
+      "Addr": "168.0.32.137"
+    },
+    "ManagerStatus": {
+      "Leader": true,
+      "Reachability": "reachable",
+      "Addr": "168.0.32.137:2377"
+    }
+  }
+]
+

Specify an output format

$ docker node inspect --format '{{ .ManagerStatus.Leader }}' self
+
+false
+

Use --format=pretty or the --pretty shorthand to pretty-print the output:

$ docker node inspect --format=pretty self
+
+ID:                     e216jshn25ckzbvmwlnh5jr3g
+Hostname:               swarm-manager
+Joined at:              2017-05-16 22:52:44.9910662 +0000 utc
+Status:
+ State:                 Ready
+ Availability:          Active
+ Address:               172.17.0.2
+Manager Status:
+ Address:               172.17.0.2:2377
+ Raft Status:           Reachable
+ Leader:                Yes
+Platform:
+ Operating System:      linux
+ Architecture:          x86_64
+Resources:
+ CPUs:                  4
+ Memory:                7.704 GiB
+Plugins:
+  Network:              overlay, bridge, null, host, overlay
+  Volume:               local
+Engine Version:         17.06.0-ce
+TLS Info:
+ TrustRoot:
+-----BEGIN CERTIFICATE-----
+MIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw
+EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy
+NDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
+A0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g
+Ah8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO
+PQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3
+zONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ==
+-----END CERTIFICATE-----
+
+ Issuer Public Key: MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ==
+ Issuer Subject:    MBMxETAPBgNVBAMTCHN3YXJtLWNh
+

Parent command

Command Description
docker node Manage Swarm nodes
Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ls%2Findex.html new file mode 100644 index 00000000..807a2495 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ls%2Findex.html @@ -0,0 +1,58 @@ +

docker node ls


List nodes in the swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists all the nodes that the Docker Swarm manager knows about. You can filter using the -f or --filter flag. Refer to the filtering section for more information about available filter options.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print nodes using a Go template
+--quiet , -q + Only display IDs

Examples

$ docker node ls
+
+ID                           HOSTNAME        STATUS  AVAILABILITY  MANAGER STATUS
+1bcef6utixb0l0ca7gxuivsj0    swarm-worker2   Ready   Active
+38ciaotwjuritcdtn9npbnkuz    swarm-worker1   Ready   Active
+e216jshn25ckzbvmwlnh5jr3g *  swarm-manager1  Ready   Active        Leader
+

Note

In the above example output, there is a hidden column of .Self that indicates if the node is the same node as the current docker daemon. A * (e.g., e216jshn25ckzbvmwlnh5jr3g *) means this node is the current docker daemon.

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

id

The id filter matches all or part of a node’s id.

$ docker node ls -f id=1
+
+ID                         HOSTNAME       STATUS  AVAILABILITY  MANAGER STATUS
+1bcef6utixb0l0ca7gxuivsj0  swarm-worker2  Ready   Active
+

label

The label filter matches nodes based on engine labels and on the presence of a label alone or a label and a value. Engine labels are configured in the daemon configuration. To filter on Swarm node labels, use node.label instead.

The following filter matches nodes with the foo label regardless of its value.

$ docker node ls -f "label=foo"
+
+ID                         HOSTNAME       STATUS  AVAILABILITY  MANAGER STATUS
+1bcef6utixb0l0ca7gxuivsj0  swarm-worker2  Ready   Active
+

node.label

The node.label filter matches nodes based on node labels and on the presence of a node.label alone or a node.label and a value.

The following filter updates nodes to have a region node label:

$ docker node update --label-add region=region-a swarm-test-01
+$ docker node update --label-add region=region-a swarm-test-02
+$ docker node update --label-add region=region-b swarm-test-03
+$ docker node update --label-add region=region-b swarm-test-04
+

Show all nodes that have a region node label set:

$ docker node ls --filter node.label=region
+
+ID                            HOSTNAME        STATUS    AVAILABILITY   MANAGER STATUS   ENGINE VERSION
+yg550ettvsjn6g6t840iaiwgb *   swarm-test-01   Ready     Active         Leader           20.10.2
+2lm9w9kbepgvkzkkeyku40e65     swarm-test-02   Ready     Active         Reachable        20.10.2
+hc0pu7ntc7s4uvj4pv7z7pz15     swarm-test-03   Ready     Active         Reachable        20.10.2
+n41b2cijmhifxxvz56vwrs12q     swarm-test-04   Ready     Active                          20.10.2
+

Show all nodes that have a region node label, with value region-a:

$ docker node ls --filter node.label=region=region-a
+
+ID                            HOSTNAME        STATUS    AVAILABILITY   MANAGER STATUS   ENGINE VERSION
+yg550ettvsjn6g6t840iaiwgb *   swarm-test-01   Ready     Active         Leader           20.10.2
+2lm9w9kbepgvkzkkeyku40e65     swarm-test-02   Ready     Active         Reachable        20.10.2
+

membership

The membership filter matches nodes based on the presence of a membership and a value accepted or pending.

The following filter matches nodes with the membership of accepted.

$ docker node ls -f "membership=accepted"
+
+ID                           HOSTNAME        STATUS  AVAILABILITY  MANAGER STATUS
+1bcef6utixb0l0ca7gxuivsj0    swarm-worker2   Ready   Active
+38ciaotwjuritcdtn9npbnkuz    swarm-worker1   Ready   Active
+

name

The name filter matches on all or part of a node hostname.

The following filter matches the nodes with a name equal to swarm-master string.

$ docker node ls -f name=swarm-manager1
+
+ID                           HOSTNAME        STATUS  AVAILABILITY  MANAGER STATUS
+e216jshn25ckzbvmwlnh5jr3g *  swarm-manager1  Ready   Active        Leader
+

role

The role filter matches nodes based on the presence of a role and a value worker or manager.

The following filter matches nodes with the manager role.

$ docker node ls -f "role=manager"
+
+ID                           HOSTNAME        STATUS  AVAILABILITY  MANAGER STATUS
+e216jshn25ckzbvmwlnh5jr3g *  swarm-manager1  Ready   Active        Leader
+

Formatting

The formatting options (--format) pretty-prints nodes output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Node ID
.Self Node of the daemon (true/false, trueindicates that the node is the same as current docker daemon)
.Hostname Node hostname
.Status Node status
.Availability Node availability (“active”, “pause”, or “drain”)
.ManagerStatus Manager status of the node
.TLSStatus TLS status of the node (“Ready”, or “Needs Rotation” has TLS certificate signed by an old CA)
.EngineVersion Engine version

When using the --format option, the node ls command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the ID, Hostname, and TLS Status entries separated by a colon (:) for all nodes:

$ docker node ls --format "{{.ID}}: {{.Hostname}} {{.TLSStatus}}"
+
+e216jshn25ckzbvmwlnh5jr3g: swarm-manager1 Ready
+35o6tiywb700jesrt3dmllaza: swarm-worker1 Needs Rotation
+

Parent command

Command Description
docker node Manage Swarm nodes
Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_promote%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_promote%2Findex.html new file mode 100644 index 00000000..f7f298cc --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_promote%2Findex.html @@ -0,0 +1,8 @@ +

docker node promote


Promote one or more nodes to manager in the swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node promote NODE [NODE...]
+

Description

Promotes a node to manager. This command can only be executed on a manager node.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Examples

$ docker node promote <node name>
+

Parent command

Command Description
docker node Manage Swarm nodes
Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node_promote/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ps%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ps%2Findex.html new file mode 100644 index 00000000..d410f8d0 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_ps%2Findex.html @@ -0,0 +1,41 @@ +

docker node ps


List tasks running on one or more nodes, defaults to current node

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node ps [OPTIONS] [NODE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists all the tasks on a Node that Docker knows about. You can filter using the -f or --filter flag. Refer to the filtering section for more information about available filter options.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print tasks using a Go template
--no-resolve Do not map IDs to Names
--no-trunc Do not truncate output
+--quiet , -q + Only display task IDs

Examples

$ docker node ps swarm-manager1
+
+NAME                                IMAGE        NODE            DESIRED STATE  CURRENT STATE
+redis.1.7q92v0nr1hcgts2amcjyqg3pq   redis:3.0.6  swarm-manager1  Running        Running 5 hours
+redis.6.b465edgho06e318egmgjbqo4o   redis:3.0.6  swarm-manager1  Running        Running 29 seconds
+redis.7.bg8c07zzg87di2mufeq51a2qp   redis:3.0.6  swarm-manager1  Running        Running 5 seconds
+redis.9.dkkual96p4bb3s6b10r7coxxt   redis:3.0.6  swarm-manager1  Running        Running 5 seconds
+redis.10.0tgctg8h8cech4w0k0gwrmr23  redis:3.0.6  swarm-manager1  Running        Running 5 seconds
+

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

name

The name filter matches on all or part of a task’s name.

The following filter matches all tasks with a name containing the redis string.

$ docker node ps -f name=redis swarm-manager1
+
+NAME                                IMAGE        NODE            DESIRED STATE  CURRENT STATE
+redis.1.7q92v0nr1hcgts2amcjyqg3pq   redis:3.0.6  swarm-manager1  Running        Running 5 hours
+redis.6.b465edgho06e318egmgjbqo4o   redis:3.0.6  swarm-manager1  Running        Running 29 seconds
+redis.7.bg8c07zzg87di2mufeq51a2qp   redis:3.0.6  swarm-manager1  Running        Running 5 seconds
+redis.9.dkkual96p4bb3s6b10r7coxxt   redis:3.0.6  swarm-manager1  Running        Running 5 seconds
+redis.10.0tgctg8h8cech4w0k0gwrmr23  redis:3.0.6  swarm-manager1  Running        Running 5 seconds
+

id

The id filter matches a task’s id.

$ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1
+
+NAME                                IMAGE        NODE            DESIRED STATE  CURRENT STATE
+redis.7.bg8c07zzg87di2mufeq51a2qp   redis:3.0.6  swarm-manager1  Running        Running 5 seconds
+

label

The label filter matches tasks based on the presence of a label alone or a label and a value.

The following filter matches tasks with the usage label regardless of its value.

$ docker node ps -f "label=usage"
+
+NAME                               IMAGE        NODE            DESIRED STATE  CURRENT STATE
+redis.6.b465edgho06e318egmgjbqo4o  redis:3.0.6  swarm-manager1  Running        Running 10 minutes
+redis.7.bg8c07zzg87di2mufeq51a2qp  redis:3.0.6  swarm-manager1  Running        Running 9 minutes
+

desired-state

The desired-state filter can take the values running, shutdown, or accepted.

Formatting

The formatting options (--format) pretty-prints tasks output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Task ID
.Name Task name
.Image Task image
.Node Node ID
.DesiredState Desired state of the task (running, shutdown, or accepted)
.CurrentState Current state of the task
.Error Error
.Ports Task published ports

When using the --format option, the node ps command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the Name and Image entries separated by a colon (:) for all tasks:

$ docker node ps --format "{{.Name}}: {{.Image}}"
+
+top.1: busybox
+top.2: busybox
+top.3: busybox
+

Parent command

Command Description
docker node Manage Swarm nodes
Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node_ps/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_rm%2Findex.html new file mode 100644 index 00000000..d747fce2 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_rm%2Findex.html @@ -0,0 +1,19 @@ +

docker node rm


Remove one or more nodes from the swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node rm [OPTIONS] NODE [NODE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Removes the specified nodes from a swarm.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force remove a node from the swarm

Examples

Remove a stopped node from the swarm

$ docker node rm swarm-node-02
+
+Node swarm-node-02 removed from swarm
+

Attempt to remove a running node from a swarm

Removes the specified nodes from the swarm, but only if the nodes are in the down state. If you attempt to remove an active node you will receive an error:

$ docker node rm swarm-node-03
+
+Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not
+down and can't be removed
+

Forcibly remove an inaccessible node from a swarm

If you lose access to a worker node or need to shut it down because it has been compromised or is not behaving as expected, you can use the --force option. This may cause transient errors or interruptions, depending on the type of task being run on the node.

$ docker node rm --force swarm-node-03
+
+Node swarm-node-03 removed from swarm
+

A manager node must be demoted to a worker node (using docker node demote) before you can remove it from the swarm.

Parent command

Command Description
docker node Manage Swarm nodes
Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_update%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_update%2Findex.html new file mode 100644 index 00000000..41b5deb8 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fnode_update%2Findex.html @@ -0,0 +1,10 @@ +

docker node update


Update a node

Swarm This command works with the Swarm orchestrator.

Usage

$ docker node update [OPTIONS] NODE
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Update metadata about a node, such as its availability, labels, or roles.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--availability Availability of the node ("active"|"pause"|"drain")
--label-add Add or update a node label (key=value)
--label-rm Remove a node label if exists
--role Role of the node ("worker"|"manager")

Examples

Add label metadata to a node

Add metadata to a swarm node using node labels. You can specify a node label as a key with an empty value:

$ docker node update --label-add foo worker1
+

To add multiple labels to a node, pass the --label-add flag for each label:

$ docker node update --label-add foo --label-add bar worker1
+

When you create a service, you can use node labels as a constraint. A constraint limits the nodes where the scheduler deploys tasks for a service.

For example, to add a type label to identify nodes where the scheduler should deploy message queue service tasks:

$ docker node update --label-add type=queue worker1
+

The labels you set for nodes using docker node update apply only to the node entity within the swarm. Do not confuse them with the docker daemon labels for dockerd.

For more information about labels, refer to apply custom metadata.

Parent command

Command Description
docker node Manage Swarm nodes
Command Description
docker node demote Demote one or more nodes from manager in the swarm
docker node inspect Display detailed information on one or more nodes
docker node ls List nodes in the swarm
docker node promote Promote one or more nodes to manager in the swarm
docker node ps List tasks running on one or more nodes, defaults to current node
docker node rm Remove one or more nodes from the swarm
docker node update Update a node
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/node_update/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fpause%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fpause%2Findex.html new file mode 100644 index 00000000..27a21f98 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fpause%2Findex.html @@ -0,0 +1,8 @@ +

docker pause


Pause all processes within one or more containers

Usage

$ docker pause CONTAINER [CONTAINER...]
+

Description

The docker pause command suspends all processes in the specified containers. On Linux, this uses the freezer cgroup. Traditionally, when suspending a process the SIGSTOP signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. On Windows, only Hyper-V containers can be paused.

See the freezer cgroup documentation for further details.

For example uses of this command, refer to the examples section below.

Examples

$ docker pause my_container
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/pause/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin%2Findex.html new file mode 100644 index 00000000..c85edb41 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin%2Findex.html @@ -0,0 +1,7 @@ +

docker plugin


Manage plugins

Usage

$ docker plugin COMMAND
+

Description

Manage plugins.

Child commands

Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_create%2Findex.html new file mode 100644 index 00000000..2d445651 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_create%2Findex.html @@ -0,0 +1,21 @@ +

docker plugin create


Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.

Usage

$ docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Creates a plugin. Before creating the plugin, prepare the plugin’s root filesystem as well as the config.json

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--compress Compress the context using gzip

Examples

The following example shows how to create a sample plugin.

$ ls -ls /home/pluginDir
+
+total 4
+4 -rw-r--r--  1 root root 431 Nov  7 01:40 config.json
+0 drwxr-xr-x 19 root root 420 Nov  7 01:40 rootfs
+
+$ docker plugin create plugin /home/pluginDir
+
+plugin
+
+$ docker plugin ls
+
+ID              NAME            DESCRIPTION                  ENABLED
+672d8144ec02    plugin:latest   A sample plugin for Docker   false
+

The plugin can subsequently be enabled for local use or pushed to the public registry.

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_disable%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_disable%2Findex.html new file mode 100644 index 00000000..682a8991 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_disable%2Findex.html @@ -0,0 +1,21 @@ +

docker plugin disable


Disable a plugin

Usage

$ docker plugin disable [OPTIONS] PLUGIN
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Disables a plugin. The plugin must be installed before it can be disabled, see docker plugin install. Without the -f option, a plugin that has references (e.g., volumes, networks) cannot be disabled.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force the disable of an active plugin

Examples

The following example shows that the sample-volume-plugin plugin is installed and enabled:

$ docker plugin ls
+
+ID            NAME                                    DESCRIPTION                ENABLED
+69553ca1d123  tiborvass/sample-volume-plugin:latest   A test plugin for Docker   true
+

To disable the plugin, use the following command:

$ docker plugin disable tiborvass/sample-volume-plugin
+
+tiborvass/sample-volume-plugin
+
+$ docker plugin ls
+
+ID            NAME                                    DESCRIPTION                ENABLED
+69553ca1d123  tiborvass/sample-volume-plugin:latest   A test plugin for Docker   false
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_disable/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_enable%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_enable%2Findex.html new file mode 100644 index 00000000..c8f5e727 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_enable%2Findex.html @@ -0,0 +1,19 @@ +

docker plugin enable


Enable a plugin

Usage

$ docker plugin enable [OPTIONS] PLUGIN
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Enables a plugin. The plugin must be installed before it can be enabled, see docker plugin install.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--timeout 30 HTTP client timeout (in seconds)

Examples

The following example shows that the sample-volume-plugin plugin is installed, but disabled:

$ docker plugin ls
+
+ID            NAME                                    DESCRIPTION                ENABLED
+69553ca1d123  tiborvass/sample-volume-plugin:latest   A test plugin for Docker   false
+

To enable the plugin, use the following command:

$ docker plugin enable tiborvass/sample-volume-plugin
+
+tiborvass/sample-volume-plugin
+
+$ docker plugin ls
+
+ID            NAME                                    DESCRIPTION                ENABLED
+69553ca1d123  tiborvass/sample-volume-plugin:latest   A test plugin for Docker   true
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_enable/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_inspect%2Findex.html new file mode 100644 index 00000000..60c7b0eb --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_inspect%2Findex.html @@ -0,0 +1,118 @@ +

docker plugin inspect


Display detailed information on one or more plugins

Usage

$ docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Returns information about a plugin. By default, this command renders all results in a JSON array.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template

Examples

Inspect a plugin

The following example example inspects the tiborvass/sample-volume-plugin plugin:

$ docker plugin inspect tiborvass/sample-volume-plugin:latest
+

Output is in JSON format (output below is formatted for readability):

{
+  "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21",
+  "Name": "tiborvass/sample-volume-plugin:latest",
+  "PluginReference": "tiborvas/sample-volume-plugin:latest",
+  "Enabled": true,
+  "Config": {
+    "Mounts": [
+      {
+        "Name": "",
+        "Description": "",
+        "Settable": null,
+        "Source": "/data",
+        "Destination": "/data",
+        "Type": "bind",
+        "Options": [
+          "shared",
+          "rbind"
+        ]
+      },
+      {
+        "Name": "",
+        "Description": "",
+        "Settable": null,
+        "Source": null,
+        "Destination": "/foobar",
+        "Type": "tmpfs",
+        "Options": null
+      }
+    ],
+    "Env": [
+      "DEBUG=1"
+    ],
+    "Args": null,
+    "Devices": null
+  },
+  "Manifest": {
+    "ManifestVersion": "v0",
+    "Description": "A test plugin for Docker",
+    "Documentation": "https://docs.docker.com/engine/extend/plugins/",
+    "Interface": {
+      "Types": [
+        "docker.volumedriver/1.0"
+      ],
+      "Socket": "plugins.sock"
+    },
+    "Entrypoint": [
+      "plugin-sample-volume-plugin",
+      "/data"
+    ],
+    "Workdir": "",
+    "User": {
+    },
+    "Network": {
+      "Type": "host"
+    },
+    "Capabilities": null,
+    "Mounts": [
+      {
+        "Name": "",
+        "Description": "",
+        "Settable": null,
+        "Source": "/data",
+        "Destination": "/data",
+        "Type": "bind",
+        "Options": [
+          "shared",
+          "rbind"
+        ]
+      },
+      {
+        "Name": "",
+        "Description": "",
+        "Settable": null,
+        "Source": null,
+        "Destination": "/foobar",
+        "Type": "tmpfs",
+        "Options": null
+      }
+    ],
+    "Devices": [
+      {
+        "Name": "device",
+        "Description": "a host device to mount",
+        "Settable": null,
+        "Path": "/dev/cpu_dma_latency"
+      }
+    ],
+    "Env": [
+      {
+        "Name": "DEBUG",
+        "Description": "If set, prints debug messages",
+        "Settable": null,
+        "Value": "1"
+      }
+    ],
+    "Args": {
+      "Name": "args",
+      "Description": "command line arguments",
+      "Settable": null,
+      "Value": [
+
+      ]
+    }
+  }
+}
+

Formatting the output

$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest
+
+8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_install%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_install%2Findex.html new file mode 100644 index 00000000..8ea4c487 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_install%2Findex.html @@ -0,0 +1,19 @@ +

docker plugin install


Install a plugin

Usage

$ docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Installs and enables a plugin. Docker looks first for the plugin on your Docker host. If the plugin does not exist locally, then the plugin is pulled from the registry. Note that the minimum required registry version to distribute plugins is 2.3.0

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--alias Local name for plugin
--disable Do not enable the plugin on install
--disable-content-trust true Skip image verification
--grant-all-permissions Grant all permissions necessary to run the plugin

Examples

The following example installs vieus/sshfs plugin and sets its DEBUG environment variable to 1. To install, pull the plugin from Docker Hub and prompt the user to accept the list of privileges that the plugin needs, set the plugin’s parameters and enable the plugin.

$ docker plugin install vieux/sshfs DEBUG=1
+
+Plugin "vieux/sshfs" is requesting the following privileges:
+ - network: [host]
+ - device: [/dev/fuse]
+ - capabilities: [CAP_SYS_ADMIN]
+Do you grant the above permissions? [y/N] y
+vieux/sshfs
+

After the plugin is installed, it appears in the list of plugins:

$ docker plugin ls
+
+ID             NAME                  DESCRIPTION                ENABLED
+69553ca1d123   vieux/sshfs:latest    sshFS plugin for Docker    true
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_install/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_ls%2Findex.html new file mode 100644 index 00000000..225f90a0 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_ls%2Findex.html @@ -0,0 +1,25 @@ +

docker plugin ls


List plugins

Usage

$ docker plugin ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists all the plugins that are currently installed. You can install plugins using the docker plugin install command. You can also filter using the -f or --filter flag. Refer to the filtering section for more information about available filter options.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Provide filter values (e.g. 'enabled=true')
--format Pretty-print plugins using a Go template
--no-trunc Don't truncate output
+--quiet , -q + Only display plugin IDs

Examples

$ docker plugin ls
+
+ID            NAME                                    DESCRIPTION                ENABLED
+69553ca1d123  tiborvass/sample-volume-plugin:latest   A test plugin for Docker   true
+

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

enabled

The enabled filter matches on plugins enabled or disabled.

capability

The capability filter matches on plugin capabilities. One plugin might have multiple capabilities. Currently volumedriver, networkdriver, ipamdriver, logdriver, metricscollector, and authz are supported capabilities.

$ docker plugin install --disable vieux/sshfs
+
+Installed plugin vieux/sshfs
+
+$ docker plugin ls --filter enabled=true
+
+ID                  NAME                DESCRIPTION         ENABLED
+

Formatting

The formatting options (--format) pretty-prints plugins output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Plugin ID
.Name Plugin name and tag
.Description Plugin description
.Enabled Whether plugin is enabled or not
.PluginReference The reference used to push/pull from a registry

When using the --format option, the plugin ls command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the ID and Name entries separated by a colon (:) for all plugins:

$ docker plugin ls --format "{{.ID}}: {{.Name}}"
+
+4be01827a72e: vieux/sshfs:latest
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_push%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_push%2Findex.html new file mode 100644 index 00000000..ff857723 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_push%2Findex.html @@ -0,0 +1,13 @@ +

docker plugin push


Push a plugin to a registry

Usage

$ docker plugin push [OPTIONS] PLUGIN[:TAG]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

After you have created a plugin using docker plugin create and the plugin is ready for distribution, use docker plugin push to share your images to Docker Hub or a self-hosted registry.

Registry credentials are managed by docker login.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--disable-content-trust true Skip image signing

Examples

The following example shows how to push a sample user/plugin.

$ docker plugin ls
+
+ID             NAME                    DESCRIPTION                  ENABLED
+69553ca1d456   user/plugin:latest      A sample plugin for Docker   false
+
+$ docker plugin push user/plugin
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_push/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_rm%2Findex.html new file mode 100644 index 00000000..5475811b --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_rm%2Findex.html @@ -0,0 +1,16 @@ +

docker plugin rm


Remove one or more plugins

Usage

$ docker plugin rm [OPTIONS] PLUGIN [PLUGIN...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Removes a plugin. You cannot remove a plugin if it is enabled, you must disable a plugin using the docker plugin disable before removing it (or use --force, use of force is not recommended, since it can affect functioning of running containers using the plugin).

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force the removal of an active plugin

Examples

The following example disables and removes the sample-volume-plugin:latest plugin:

$ docker plugin disable tiborvass/sample-volume-plugin
+
+tiborvass/sample-volume-plugin
+
+$ docker plugin rm tiborvass/sample-volume-plugin:latest
+
+tiborvass/sample-volume-plugin
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_set%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_set%2Findex.html new file mode 100644 index 00000000..0476182a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_set%2Findex.html @@ -0,0 +1,39 @@ +

docker plugin set


Change settings for a plugin

Usage

$ docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...]
+

Description

Change settings for a plugin. The plugin must be disabled.

The settings currently supported are:

For example uses of this command, refer to the examples section below.

Examples

Change an environment variable

The following example change the env variable DEBUG on the sample-volume-plugin plugin.

$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin
+[DEBUG=0]
+
+$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1
+
+$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin
+[DEBUG=1]
+

Change the source of a mount

The following example change the source of the mymount mount on the myplugin plugin.

$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin
+/foo
+
+$ docker plugins set myplugin mymount.source=/bar
+
+$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin
+/bar
+

Note

Since only source is settable in mymount, docker plugins set mymount=/bar myplugin would work too.

Change a device path

The following example change the path of the mydevice device on the myplugin plugin.

$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin
+
+/dev/foo
+
+$ docker plugins set myplugin mydevice.path=/dev/bar
+
+$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin
+
+/dev/bar
+

Note Since only path is settable in mydevice, docker plugins set mydevice=/dev/bar myplugin would work too.

Change the source of the arguments

The following example change the value of the args on the myplugin plugin.

$ docker plugin inspect -f '{{.Settings.Args}}' myplugin
+
+["foo", "bar"]
+
+$ docker plugins set myplugin myargs="foo bar baz"
+
+$ docker plugin inspect -f '{{.Settings.Args}}' myplugin
+
+["foo", "bar", "baz"]
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_set/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_upgrade%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_upgrade%2Findex.html new file mode 100644 index 00000000..cdc99b44 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fplugin_upgrade%2Findex.html @@ -0,0 +1,52 @@ +

docker plugin upgrade


Upgrade an existing plugin

Usage

$ docker plugin upgrade [OPTIONS] PLUGIN [REMOTE]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Upgrades an existing plugin to the specified remote plugin image. If no remote is specified, Docker will re-pull the current image and use the updated version. All existing references to the plugin will continue to work. The plugin must be disabled before running the upgrade.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--disable-content-trust true Skip image verification
--grant-all-permissions Grant all permissions necessary to run the plugin
--skip-remote-check Do not check if specified remote plugin matches existing plugin image

Examples

The following example installs vieus/sshfs plugin, uses it to create and use a volume, then upgrades the plugin.

$ docker plugin install vieux/sshfs DEBUG=1
+
+Plugin "vieux/sshfs:next" is requesting the following privileges:
+ - network: [host]
+ - device: [/dev/fuse]
+ - capabilities: [CAP_SYS_ADMIN]
+Do you grant the above permissions? [y/N] y
+vieux/sshfs:next
+
+$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume
+
+sshvolume
+
+$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello"
+
+$ docker plugin disable -f vieux/sshfs:next
+
+viex/sshfs:next
+
+# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled
+$ docker volume ls
+
+DRIVER              VOLUME NAME
+
+$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next
+
+Plugin "vieux/sshfs:next" is requesting the following privileges:
+ - network: [host]
+ - device: [/dev/fuse]
+ - capabilities: [CAP_SYS_ADMIN]
+Do you grant the above permissions? [y/N] y
+Upgrade plugin vieux/sshfs:next to vieux/sshfs:next
+
+$ docker plugin enable vieux/sshfs:next
+
+viex/sshfs:next
+
+$ docker volume ls
+
+DRIVER              VOLUME NAME
+viuex/sshfs:next    sshvolume
+
+$ docker run -it -v sshvolume:/data alpine sh -c "ls /data"
+
+hello
+

Parent command

Command Description
docker plugin Manage plugins
Command Description
docker plugin create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
docker plugin disable Disable a plugin
docker plugin enable Enable a plugin
docker plugin inspect Display detailed information on one or more plugins
docker plugin install Install a plugin
docker plugin ls List plugins
docker plugin push Push a plugin to a registry
docker plugin rm Remove one or more plugins
docker plugin set Change settings for a plugin
docker plugin upgrade Upgrade an existing plugin
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/plugin_upgrade/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fport%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fport%2Findex.html new file mode 100644 index 00000000..122a0db5 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fport%2Findex.html @@ -0,0 +1,28 @@ +

docker port


List port mappings or a specific mapping for the container

Usage

$ docker port CONTAINER [PRIVATE_PORT[/PROTO]]
+

For example uses of this command, refer to the examples section below.

Examples

Show all mapped ports

You can find out all the ports mapped by not specifying a PRIVATE_PORT, or just a specific mapping:

$ docker ps
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS                                            NAMES
+b650456536c7        busybox:latest      top                 54 minutes ago      Up 54 minutes       0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp   test
+
+$ docker port test
+
+7890/tcp -> 0.0.0.0:4321
+9876/tcp -> 0.0.0.0:1234
+
+$ docker port test 7890/tcp
+
+0.0.0.0:4321
+
+$ docker port test 7890/udp
+
+2014/06/24 11:53:36 Error: No public port '7890/udp' published for test
+
+$ docker port test 7890
+
+0.0.0.0:4321
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/port/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fps%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fps%2Findex.html new file mode 100644 index 00000000..72e25f21 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fps%2Findex.html @@ -0,0 +1,172 @@ +

docker ps


List containers

Usage

$ docker ps [OPTIONS]
+

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--all , -a + Show all containers (default shows just running)
+--filter , -f + Filter output based on conditions provided
--format Pretty-print containers using a Go template
+--last , -n + -1 Show n last created containers (includes all states)
+--latest , -l + Show the latest created container (includes all states)
--no-trunc Don't truncate output
+--quiet , -q + Only display container IDs
+--size , -s + Display total file sizes

Examples

Prevent truncating output

Running docker ps --no-trunc showing 2 linked containers.

$ docker ps
+
+CONTAINER ID        IMAGE                        COMMAND                CREATED              STATUS              PORTS               NAMES
+4c01db0b339c        ubuntu:12.04                 bash                   17 seconds ago       Up 16 seconds       3300-3310/tcp       webapp
+d7886598dbe2        crosbymichael/redis:latest   /redis-server --dir    33 minutes ago       Up 33 minutes       6379/tcp            redis,webapp/db
+

Show both running and stopped containers

The docker ps command only shows running containers by default. To see all containers, use the -a (or --all) flag:

$ docker ps -a
+

docker ps groups exposed ports into a single range if possible. E.g., a container that exposes TCP ports 100, 101, 102 displays 100-102/tcp in the PORTS column.

Show disk usage by container

The docker ps -s command displays two different on-disk-sizes for each container:

$ docker ps -s
+
+CONTAINER ID   IMAGE          COMMAND                  CREATED        STATUS       PORTS   NAMES        SIZE                                                                                      SIZE
+e90b8831a4b8   nginx          "/bin/bash -c 'mkdir "   11 weeks ago   Up 4 hours           my_nginx     35.58 kB (virtual 109.2 MB)
+00c6131c5e30   telegraf:1.5   "/entrypoint.sh"         11 weeks ago   Up 11 weeks          my_telegraf  0 B (virtual 209.5 MB)
+

For more information, refer to the container size on disk section.

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g. --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

Filter Description
id Container’s ID
name Container’s name
label An arbitrary string representing either a key or a key-value pair. Expressed as <key> or <key>=<value> +
exited An integer representing the container’s exit code. Only useful with --all.
status One of created, restarting, running, removing, paused, exited, or dead +
ancestor Filters containers which share a given image as an ancestor. Expressed as <image-name>[:<tag>], <image id>, or <image@digest> +
+before or since + Filters containers created before or after a given container ID or name
volume Filters running containers which have mounted a given volume or bind mount.
network Filters running containers connected to a given network.
+publish or expose + Filters containers which publish or expose a given port. Expressed as <port>[/<proto>] or <startport-endport>/[<proto>] +
health Filters containers based on their healthcheck status. One of starting, healthy, unhealthy or none.
isolation Windows daemon only. One of default, process, or hyperv.
is-task Filters containers that are a “task” for a service. Boolean option (true or false)

label

The label filter matches containers based on the presence of a label alone or a label and a value.

The following filter matches containers with the color label regardless of its value.

$ docker ps --filter "label=color"
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+673394ef1d4c        busybox             "top"               47 seconds ago      Up 45 seconds                           nostalgic_shockley
+d85756f57265        busybox             "top"               52 seconds ago      Up 51 seconds                           high_albattani
+

The following filter matches containers with the color label with the blue value.

$ docker ps --filter "label=color=blue"
+
+CONTAINER ID        IMAGE               COMMAND             CREATED              STATUS              PORTS               NAMES
+d85756f57265        busybox             "top"               About a minute ago   Up About a minute                       high_albattani
+

name

The name filter matches on all or part of a container’s name.

The following filter matches all containers with a name containing the nostalgic_stallman string.

$ docker ps --filter "name=nostalgic_stallman"
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+9b6247364a03        busybox             "top"               2 minutes ago       Up 2 minutes                            nostalgic_stallman
+

You can also filter for a substring in a name as this shows:

$ docker ps --filter "name=nostalgic"
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+715ebfcee040        busybox             "top"               3 seconds ago       Up 1 second                             i_am_nostalgic
+9b6247364a03        busybox             "top"               7 minutes ago       Up 7 minutes                            nostalgic_stallman
+673394ef1d4c        busybox             "top"               38 minutes ago      Up 38 minutes                           nostalgic_shockley
+

exited

The exited filter matches containers by exist status code. For example, to filter for containers that have exited successfully:

$ docker ps -a --filter 'exited=0'
+
+CONTAINER ID        IMAGE             COMMAND                CREATED             STATUS                   PORTS                      NAMES
+ea09c3c82f6e        registry:latest   /srv/run.sh            2 weeks ago         Exited (0) 2 weeks ago   127.0.0.1:5000->5000/tcp   desperate_leakey
+106ea823fe4e        fedora:latest     /bin/sh -c 'bash -l'   2 weeks ago         Exited (0) 2 weeks ago                              determined_albattani
+48ee228c9464        fedora:20         bash                   2 weeks ago         Exited (0) 2 weeks ago                              tender_torvalds
+

Filter by exit signal

You can use a filter to locate containers that exited with status of 137 meaning a SIGKILL(9) killed them.

$ docker ps -a --filter 'exited=137'
+
+CONTAINER ID        IMAGE               COMMAND                CREATED             STATUS                       PORTS               NAMES
+b3e1c0ed5bfe        ubuntu:latest       "sleep 1000"           12 seconds ago      Exited (137) 5 seconds ago                       grave_kowalevski
+a2eb5558d669        redis:latest        "/entrypoint.sh redi   2 hours ago         Exited (137) 2 hours ago                         sharp_lalande
+

Any of these events result in a 137 status:

status

The status filter matches containers by status. You can filter using created, restarting, running, removing, paused, exited and dead. For example, to filter for running containers:

$ docker ps --filter status=running
+
+CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS               NAMES
+715ebfcee040        busybox                "top"               16 minutes ago      Up 16 minutes                           i_am_nostalgic
+d5c976d3c462        busybox                "top"               23 minutes ago      Up 23 minutes                           top
+9b6247364a03        busybox                "top"               24 minutes ago      Up 24 minutes                           nostalgic_stallman
+

To filter for paused containers:

$ docker ps --filter status=paused
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS                      PORTS               NAMES
+673394ef1d4c        busybox             "top"               About an hour ago   Up About an hour (Paused)                       nostalgic_shockley
+

ancestor

The ancestor filter matches containers based on its image or a descendant of it. The filter supports the following image representation:

If you don’t specify a tag, the latest tag is used. For example, to filter for containers that use the latest ubuntu image:

$ docker ps --filter ancestor=ubuntu
+
+CONTAINER ID        IMAGE               COMMAND             CREATED              STATUS              PORTS               NAMES
+919e1179bdb8        ubuntu-c1           "top"               About a minute ago   Up About a minute                       admiring_lovelace
+5d1e4a540723        ubuntu-c2           "top"               About a minute ago   Up About a minute                       admiring_sammet
+82a598284012        ubuntu              "top"               3 minutes ago        Up 3 minutes                            sleepy_bose
+bab2a34ba363        ubuntu              "top"               3 minutes ago        Up 3 minutes                            focused_yonath
+

Match containers based on the ubuntu-c1 image which, in this case, is a child of ubuntu:

$ docker ps --filter ancestor=ubuntu-c1
+
+CONTAINER ID        IMAGE               COMMAND             CREATED              STATUS              PORTS               NAMES
+919e1179bdb8        ubuntu-c1           "top"               About a minute ago   Up About a minute                       admiring_lovelace
+

Match containers based on the ubuntu version 12.04.5 image:

$ docker ps --filter ancestor=ubuntu:12.04.5
+
+CONTAINER ID        IMAGE               COMMAND             CREATED              STATUS              PORTS               NAMES
+82a598284012        ubuntu:12.04.5      "top"               3 minutes ago        Up 3 minutes                            sleepy_bose
+

The following matches containers based on the layer d0e008c6cf02 or an image that have this layer in its layer stack.

$ docker ps --filter ancestor=d0e008c6cf02
+
+CONTAINER ID        IMAGE               COMMAND             CREATED              STATUS              PORTS               NAMES
+82a598284012        ubuntu:12.04.5      "top"               3 minutes ago        Up 3 minutes                            sleepy_bose
+

Create time

before

The before filter shows only containers created before the container with given id or name. For example, having these containers created:

$ docker ps
+
+CONTAINER ID        IMAGE       COMMAND       CREATED              STATUS              PORTS              NAMES
+9c3527ed70ce        busybox     "top"         14 seconds ago       Up 15 seconds                          desperate_dubinsky
+4aace5031105        busybox     "top"         48 seconds ago       Up 49 seconds                          focused_hamilton
+6e63f6ff38b0        busybox     "top"         About a minute ago   Up About a minute                      distracted_fermat
+

Filtering with before would give:

$ docker ps -f before=9c3527ed70ce
+
+CONTAINER ID        IMAGE       COMMAND       CREATED              STATUS              PORTS              NAMES
+4aace5031105        busybox     "top"         About a minute ago   Up About a minute                      focused_hamilton
+6e63f6ff38b0        busybox     "top"         About a minute ago   Up About a minute                      distracted_fermat
+
since

The since filter shows only containers created since the container with given id or name. For example, with the same containers as in before filter:

$ docker ps -f since=6e63f6ff38b0
+
+CONTAINER ID        IMAGE       COMMAND       CREATED             STATUS              PORTS               NAMES
+9c3527ed70ce        busybox     "top"         10 minutes ago      Up 10 minutes                           desperate_dubinsky
+4aace5031105        busybox     "top"         10 minutes ago      Up 10 minutes                           focused_hamilton
+

volume

The volume filter shows only containers that mount a specific volume or have a volume mounted in a specific path:

$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}"
+
+CONTAINER ID        MOUNTS
+9c3527ed70ce        remote-volume
+
+$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}"
+
+CONTAINER ID        MOUNTS
+9c3527ed70ce        remote-volume
+

network

The network filter shows only containers that are connected to a network with a given name or id.

The following filter matches all containers that are connected to a network with a name containing net1.

$ docker run -d --net=net1 --name=test1 ubuntu top
+$ docker run -d --net=net2 --name=test2 ubuntu top
+
+$ docker ps --filter network=net1
+
+CONTAINER ID        IMAGE       COMMAND       CREATED             STATUS              PORTS               NAMES
+9d4893ed80fe        ubuntu      "top"         10 minutes ago      Up 10 minutes                           test1
+

The network filter matches on both the network’s name and id. The following example shows all containers that are attached to the net1 network, using the network id as a filter;

$ docker network inspect --format "{{.ID}}" net1
+
+8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5
+
+$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5
+
+CONTAINER ID        IMAGE       COMMAND       CREATED             STATUS              PORTS               NAMES
+9d4893ed80fe        ubuntu      "top"         10 minutes ago      Up 10 minutes                           test1
+

publish and expose

The publish and expose filters show only containers that have published or exposed port with a given port number, port range, and/or protocol. The default protocol is tcp when not specified.

The following filter matches all containers that have published port of 80:

$ docker run -d --publish=80 busybox top
+$ docker run -d --expose=8080 busybox top
+
+$ docker ps -a
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS                   NAMES
+9833437217a5        busybox             "top"               5 seconds ago       Up 4 seconds        8080/tcp                dreamy_mccarthy
+fc7e477723b7        busybox             "top"               50 seconds ago      Up 50 seconds       0.0.0.0:32768->80/tcp   admiring_roentgen
+
+$ docker ps --filter publish=80
+
+CONTAINER ID        IMAGE               COMMAND             CREATED              STATUS              PORTS                   NAMES
+fc7e477723b7        busybox             "top"               About a minute ago   Up About a minute   0.0.0.0:32768->80/tcp   admiring_roentgen
+

The following filter matches all containers that have exposed TCP port in the range of 8000-8080:

$ docker ps --filter expose=8000-8080/tcp
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+9833437217a5        busybox             "top"               21 seconds ago      Up 19 seconds       8080/tcp            dreamy_mccarthy
+

The following filter matches all containers that have exposed UDP port 80:

$ docker ps --filter publish=80/udp
+
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+

Formatting

The formatting option (--format) pretty-prints container output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Container ID
.Image Image ID
.Command Quoted command
.CreatedAt Time when the container was created.
.RunningFor Elapsed time since the container was started.
.Ports Exposed ports.
.State Container status (for example; “created”, “running”, “exited”).
.Status Container status with details about duration and health-status.
.Size Container disk size.
.Names Container names.
.Labels All labels assigned to the container.
.Label Value of a specific label for this container. For example '{{.Label "com.docker.swarm.cpu"}}' +
.Mounts Names of the volumes mounted in this container.
.Networks Names of the networks attached to this container.

When using the --format option, the ps command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the ID and Command entries separated by a colon (:) for all running containers:

$ docker ps --format "{{.ID}}: {{.Command}}"
+
+a87ecb4f327c: /bin/sh -c #(nop) MA
+01946d9d34d8: /bin/sh -c #(nop) MA
+c1d3b0166030: /bin/sh -c yum -y up
+41d50ecd2f57: /bin/sh -c #(nop) MA
+

To list all running containers with their labels in a table format you can use:

$ docker ps --format "table {{.ID}}\t{{.Labels}}"
+
+CONTAINER ID        LABELS
+a87ecb4f327c        com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd
+01946d9d34d8
+c1d3b0166030        com.docker.swarm.node=debian,com.docker.swarm.cpu=6
+41d50ecd2f57        com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/ps/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fpull%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fpull%2Findex.html new file mode 100644 index 00000000..72672345 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fpull%2Findex.html @@ -0,0 +1,72 @@ +

docker pull


Pull an image or a repository from a registry

Usage

$ docker pull [OPTIONS] NAME[:TAG|@DIGEST]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Most of your images will be created on top of a base image from the Docker Hub registry.

Docker Hub contains many pre-built images that you can pull and try without needing to define and configure your own.

To download a particular image, or set of images (i.e., a repository), use docker pull.

Proxy configuration

If you are behind an HTTP proxy server, for example in corporate settings, before open a connect to registry, you may need to configure the Docker daemon’s proxy settings, using the HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables. To set these environment variables on a host using systemd, refer to the control and configure Docker with systemd for variables configuration.

Concurrent downloads

By default the Docker daemon will pull three layers of an image at a time. If you are on a low bandwidth connection this may cause timeout issues and you may want to lower this via the --max-concurrent-downloads daemon option. See the daemon documentation for more details.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--all-tags , -a + Download all tagged images in the repository
--disable-content-trust true Skip image verification
--platform Set platform if server is multi-platform capable
+--quiet , -q + Suppress verbose output

Examples

Pull an image from Docker Hub

To download a particular image, or set of images (i.e., a repository), use docker pull. If no tag is provided, Docker Engine uses the :latest tag as a default. This command pulls the debian:latest image:

$ docker pull debian
+
+Using default tag: latest
+latest: Pulling from library/debian
+fdd5d7827f33: Pull complete
+a3ed95caeb02: Pull complete
+Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa
+Status: Downloaded newer image for debian:latest
+

Docker images can consist of multiple layers. In the example above, the image consists of two layers; fdd5d7827f33 and a3ed95caeb02.

Layers can be reused by images. For example, the debian:jessie image shares both layers with debian:latest. Pulling the debian:jessie image therefore only pulls its metadata, but not its layers, because all layers are already present locally:

$ docker pull debian:jessie
+
+jessie: Pulling from library/debian
+fdd5d7827f33: Already exists
+a3ed95caeb02: Already exists
+Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e
+Status: Downloaded newer image for debian:jessie
+

To see which images are present locally, use the docker images command:

$ docker images
+
+REPOSITORY   TAG      IMAGE ID        CREATED      SIZE
+debian       jessie   f50f9524513f    5 days ago   125.1 MB
+debian       latest   f50f9524513f    5 days ago   125.1 MB
+

Docker uses a content-addressable image store, and the image ID is a SHA256 digest covering the image’s configuration and layers. In the example above, debian:jessie and debian:latest have the same image ID because they are actually the same image tagged with different names. Because they are the same image, their layers are stored only once and do not consume extra disk space.

For more information about images, layers, and the content-addressable store, refer to understand images, containers, and storage drivers.

Pull an image by digest (immutable identifier)

So far, you’ve pulled images by their name (and “tag”). Using names and tags is a convenient way to work with images. When using tags, you can docker pull an image again to make sure you have the most up-to-date version of that image. For example, docker pull ubuntu:20.04 pulls the latest version of the Ubuntu 20.04 image.

In some cases you don’t want images to be updated to newer versions, but prefer to use a fixed version of an image. Docker enables you to pull an image by its digest. When pulling an image by digest, you specify exactly which version of an image to pull. Doing so, allows you to “pin” an image to that version, and guarantee that the image you’re using is always the same.

To know the digest of an image, pull the image first. Let’s pull the latest ubuntu:20.04 image from Docker Hub:

$ docker pull ubuntu:20.04
+
+20.04: Pulling from library/ubuntu
+16ec32c2132b: Pull complete
+Digest: sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3
+Status: Downloaded newer image for ubuntu:20.04
+docker.io/library/ubuntu:20.04
+

Docker prints the digest of the image after the pull has finished. In the example above, the digest of the image is:

sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3
+

Docker also prints the digest of an image when pushing to a registry. This may be useful if you want to pin to a version of the image you just pushed.

A digest takes the place of the tag when pulling an image, for example, to pull the above image by digest, run the following command:

$ docker pull ubuntu@sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3
+
+docker.io/library/ubuntu@sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3: Pulling from library/ubuntu
+Digest: sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3
+Status: Image is up to date for ubuntu@sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3
+docker.io/library/ubuntu@sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3
+

Digest can also be used in the FROM of a Dockerfile, for example:

FROM ubuntu@sha256:82becede498899ec668628e7cb0ad87b6e1c371cb8a1e597d83a47fac21d6af3
+LABEL org.opencontainers.image.authors="some maintainer <maintainer@example.com>"
+

Note

Using this feature “pins” an image to a specific version in time. Docker will therefore not pull updated versions of an image, which may include security updates. If you want to pull an updated image, you need to change the digest accordingly.

Pull from a different registry

By default, docker pull pulls images from Docker Hub. It is also possible to manually specify the path of a registry to pull from. For example, if you have set up a local registry, you can specify its path to pull from it. A registry path is similar to a URL, but does not contain a protocol specifier (https://).

The following command pulls the testing/test-image image from a local registry listening on port 5000 (myregistry.local:5000):

$ docker pull myregistry.local:5000/testing/test-image
+

Registry credentials are managed by docker login.

Docker uses the https:// protocol to communicate with a registry, unless the registry is allowed to be accessed over an insecure connection. Refer to the insecure registries section for more information.

Pull a repository with multiple images

By default, docker pull pulls a single image from the registry. A repository can contain multiple images. To pull all images from a repository, provide the -a (or --all-tags) option when using docker pull.

This command pulls all images from the fedora repository:

$ docker pull --all-tags fedora
+
+Pulling repository fedora
+ad57ef8d78d7: Download complete
+105182bb5e8b: Download complete
+511136ea3c5a: Download complete
+73bd853d2ea5: Download complete
+....
+
+Status: Downloaded newer image for fedora
+

After the pull has completed use the docker images command to see the images that were pulled. The example below shows all the fedora images that are present locally:

$ docker images fedora
+
+REPOSITORY   TAG         IMAGE ID        CREATED      SIZE
+fedora       rawhide     ad57ef8d78d7    5 days ago   359.3 MB
+fedora       20          105182bb5e8b    5 days ago   372.7 MB
+fedora       heisenbug   105182bb5e8b    5 days ago   372.7 MB
+fedora       latest      105182bb5e8b    5 days ago   372.7 MB
+

Cancel a pull

Killing the docker pull process, for example by pressing CTRL-c while it is running in a terminal, will terminate the pull operation.

$ docker pull fedora
+
+Using default tag: latest
+latest: Pulling from library/fedora
+a3ed95caeb02: Pulling fs layer
+236608c7b546: Pulling fs layer
+^C
+

Note

The Engine terminates a pull operation when the connection between the Docker Engine daemon and the Docker Engine client initiating the pull is lost. If the connection with the Engine daemon is lost for other reasons than a manual interaction, the pull is also aborted.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/pull/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fpush%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fpush%2Findex.html new file mode 100644 index 00000000..bcac30ba --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fpush%2Findex.html @@ -0,0 +1,39 @@ +

docker push


Push an image or a repository to a registry

Usage

$ docker push [OPTIONS] NAME[:TAG]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Use docker image push to share your images to the Docker Hub registry or to a self-hosted one.

Refer to the docker image tag reference for more information about valid image and tag names.

Killing the docker image push process, for example by pressing CTRL-c while it is running in a terminal, terminates the push operation.

Progress bars are shown during docker push, which show the uncompressed size. The actual amount of data that’s pushed will be compressed before sending, so the uploaded size will not be reflected by the progress bar.

Registry credentials are managed by docker login.

Concurrent uploads

By default the Docker daemon will push five layers of an image at a time. If you are on a low bandwidth connection this may cause timeout issues and you may want to lower this via the --max-concurrent-uploads daemon option. See the daemon documentation for more details.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--all-tags , -a + Push all tagged images in the repository
--disable-content-trust true Skip image signing
+--quiet , -q + Suppress verbose output

Examples

Push a new image to a registry

First save the new image by finding the container ID (using docker container ls) and then committing it to a new image name. Note that only a-z0-9-_. are allowed when naming images:

$ docker container commit c16378f943fe rhel-httpd:latest
+

Now, push the image to the registry using the image ID. In this example the registry is on host named registry-host and listening on port 5000. To do this, tag the image with the host name or IP address, and the port of the registry:

$ docker image tag rhel-httpd:latest registry-host:5000/myadmin/rhel-httpd:latest
+
+$ docker image push registry-host:5000/myadmin/rhel-httpd:latest
+

Check that this worked by running:

$ docker image ls
+

You should see both rhel-httpd and registry-host:5000/myadmin/rhel-httpd listed.

Push all tags of an image

Use the -a (or --all-tags) option to push all tags of a local image.

The following example creates multiple tags for an image, and pushes all those tags to Docker Hub.

$ docker image tag myimage registry-host:5000/myname/myimage:latest
+$ docker image tag myimage registry-host:5000/myname/myimage:v1.0.1
+$ docker image tag myimage registry-host:5000/myname/myimage:v1.0
+$ docker image tag myimage registry-host:5000/myname/myimage:v1
+

The image is now tagged under multiple names:

$ docker image ls
+
+REPOSITORY                          TAG        IMAGE ID       CREATED      SIZE
+myimage                             latest     6d5fcfe5ff17   2 hours ago  1.22MB
+registry-host:5000/myname/myimage   latest     6d5fcfe5ff17   2 hours ago  1.22MB
+registry-host:5000/myname/myimage   v1         6d5fcfe5ff17   2 hours ago  1.22MB
+registry-host:5000/myname/myimage   v1.0       6d5fcfe5ff17   2 hours ago  1.22MB
+registry-host:5000/myname/myimage   v1.0.1     6d5fcfe5ff17   2 hours ago  1.22MB
+

When pushing with the --all-tags option, all tags of the registry-host:5000/myname/myimage image are pushed:

$ docker image push --all-tags registry-host:5000/myname/myimage
+
+The push refers to repository [registry-host:5000/myname/myimage]
+195be5f8be1d: Pushed
+latest: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527
+195be5f8be1d: Layer already exists
+v1: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527
+195be5f8be1d: Layer already exists
+v1.0: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527
+195be5f8be1d: Layer already exists
+v1.0.1: digest: sha256:edafc0a0fb057813850d1ba44014914ca02d671ae247107ca70c94db686e7de6 size: 4527
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/push/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Frename%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Frename%2Findex.html new file mode 100644 index 00000000..a8837f70 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Frename%2Findex.html @@ -0,0 +1,8 @@ +

docker rename


Rename a container

Usage

$ docker rename CONTAINER NEW_NAME
+

Description

The docker rename command renames a container.

For example uses of this command, refer to the examples section below.

Examples

$ docker rename my_container my_new_container
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/rename/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Frestart%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Frestart%2Findex.html new file mode 100644 index 00000000..ac6eb2af --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Frestart%2Findex.html @@ -0,0 +1,10 @@ +

docker restart


Restart one or more containers

Usage

$ docker restart [OPTIONS] CONTAINER [CONTAINER...]
+

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--time , -t + 10 Seconds to wait for stop before killing the container

Examples

$ docker restart my_container
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/restart/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Frm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Frm%2Findex.html new file mode 100644 index 00000000..f75661b0 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Frm%2Findex.html @@ -0,0 +1,30 @@ +

docker rm


Remove one or more containers

Usage

$ docker rm [OPTIONS] CONTAINER [CONTAINER...]
+

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force the removal of a running container (uses SIGKILL)
+--link , -l + Remove the specified link
+--volumes , -v + Remove anonymous volumes associated with the container

Examples

Remove a container

This removes the container referenced under the link /redis.

$ docker rm /redis
+
+/redis
+

This removes the underlying link between /webapp and the /redis containers on the default bridge network, removing all network communication between the two containers. This does not apply when --link is used with user-specified networks.

$ docker rm --link /webapp/redis
+
+/webapp/redis
+

Force-remove a running container

This command force-removes a running container.

$ docker rm --force redis
+
+redis
+

The main process inside the container referenced under the link redis will receive SIGKILL, then the container will be removed.

Remove all stopped containers

Use the docker container prune command to remove all stopped containers, or refer to the docker system prune command to remove unused containers in addition to other Docker resources, such as (unused) images and networks.

Alternatively, you can use the docker ps with the -q / --quiet option to generate a list of container IDs to remove, and use that list as argument for the docker rm command.

Combining commands can be more flexible, but is less portable as it depends on features provided by the shell, and the exact syntax may differ depending on what shell is used. To use this approach on Windows, consider using PowerShell or Bash.

The example below uses docker ps -q to print the IDs of all containers that have exited (--filter status=exited), and removes those containers with the docker rm command:

$ docker rm $(docker ps --filter status=exited -q)
+

Or, using the xargs Linux utility;

$ docker ps --filter status=exited -q | xargs docker rm
+

Remove a container and its volumes

$ docker rm -v redis
+redis
+

This command removes the container and any volumes associated with it. Note that if a volume was specified with a name, it will not be removed.

Remove a container and selectively remove volumes

$ docker create -v awesome:/foo -v /bar --name hello redis
+hello
+
+$ docker rm -v hello
+

In this example, the volume for /foo remains intact, but the volume for /bar is removed. The same behavior holds for volumes inherited with --volumes-from.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Frmi%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Frmi%2Findex.html new file mode 100644 index 00000000..042c8e7d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Frmi%2Findex.html @@ -0,0 +1,61 @@ +

docker rmi


Remove one or more images

Usage

$ docker rmi [OPTIONS] IMAGE [IMAGE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Removes (and un-tags) one or more images from the host node. If an image has multiple tags, using this command with the tag as a parameter only removes the tag. If the tag is the only one for the image, both the image and the tag are removed.

This does not remove images from a registry. You cannot remove an image of a running container unless you use the -f option. To see all images on a host use the docker image ls command.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force removal of the image
--no-prune Do not delete untagged parents

Examples

You can remove an image using its short or long ID, its tag, or its digest. If an image has one or more tags referencing it, you must remove all of them before the image is removed. Digest references are removed automatically when an image is removed by tag.

$ docker images
+
+REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
+test1                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+test                      latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+test2                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+
+$ docker rmi fd484f19954f
+
+Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force
+2013/12/11 05:47:16 Error: failed to remove one or more images
+
+$ docker rmi test1:latest
+
+Untagged: test1:latest
+
+$ docker rmi test2:latest
+
+Untagged: test2:latest
+
+
+$ docker images
+
+REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
+test                      latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+
+$ docker rmi test:latest
+
+Untagged: test:latest
+Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
+

If you use the -f flag and specify the image’s short or long ID, then this command untags and removes all images that match the specified ID.

$ docker images
+
+REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
+test1                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+test                      latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+test2                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+
+$ docker rmi -f fd484f19954f
+
+Untagged: test1:latest
+Untagged: test:latest
+Untagged: test2:latest
+Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
+

An image pulled by digest has no tag associated with it:

$ docker images --digests
+
+REPOSITORY                     TAG       DIGEST                                                                    IMAGE ID        CREATED         SIZE
+localhost:5000/test/busybox    <none>    sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf   4986bf8c1536    9 weeks ago     2.43 MB
+

To remove an image using its digest:

$ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
+Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
+Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125
+Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2
+Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/rmi/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Frun%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Frun%2Findex.html new file mode 100644 index 00000000..908fe24a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Frun%2Findex.html @@ -0,0 +1,196 @@ +

docker run


Run a command in a new container

Usage

$ docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker run command first creates a writeable container layer over the specified image, and then starts it using the specified command. That is, docker run is equivalent to the API /containers/create then /containers/(id)/start. A stopped container can be restarted with all its previous changes intact using docker start. See docker ps -a to view a list of all containers.

The docker run command can be used in combination with docker commit to change the command that a container runs. There is additional detailed information about docker run in the Docker run reference.

For information on connecting a container to a network, see the Docker network overview.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--add-host Add a custom host-to-IP mapping (host:ip)
+--attach , -a + Attach to STDIN, STDOUT or STDERR
--blkio-weight Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)
--blkio-weight-device Block IO weight (relative device weight)
--cap-add Add Linux capabilities
--cap-drop Drop Linux capabilities
--cgroup-parent Optional parent cgroup for the container
--cgroupns +API 1.41+
Cgroup namespace to use (host|private) 'host': Run the container in the Docker host's cgroup namespace 'private': Run the container in its own private cgroup namespace '': Use the cgroup namespace as configured by the default-cgroupns-mode option on the daemon (default)
--cidfile Write the container ID to the file
--cpu-count CPU count (Windows only)
--cpu-percent CPU percent (Windows only)
--cpu-period Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit CPU CFS (Completely Fair Scheduler) quota
--cpu-rt-period Limit CPU real-time period in microseconds
--cpu-rt-runtime Limit CPU real-time runtime in microseconds
+--cpu-shares , -c + CPU shares (relative weight)
--cpus Number of CPUs
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
+--detach , -d + Run container in background and print container ID
--detach-keys Override the key sequence for detaching a container
--device Add a host device to the container
--device-cgroup-rule Add a rule to the cgroup allowed devices list
--device-read-bps Limit read rate (bytes per second) from a device
--device-read-iops Limit read rate (IO per second) from a device
--device-write-bps Limit write rate (bytes per second) to a device
--device-write-iops Limit write rate (IO per second) to a device
--disable-content-trust true Skip image verification
--dns Set custom DNS servers
--dns-opt Set DNS options
--dns-option Set DNS options
--dns-search Set custom DNS search domains
--domainname Container NIS domain name
--entrypoint Overwrite the default ENTRYPOINT of the image
+--env , -e + Set environment variables
--env-file Read in a file of environment variables
--expose Expose a port or a range of ports
--gpus +API 1.40+
GPU devices to add to the container ('all' to pass all GPUs)
--group-add Add additional groups to join
--health-cmd Command to run to check health
--health-interval Time between running the check (ms|s|m|h) (default 0s)
--health-retries Consecutive failures needed to report unhealthy
--health-start-period Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)
--health-timeout Maximum time to allow one check to run (ms|s|m|h) (default 0s)
--help Print usage
+--hostname , -h + Container host name
--init Run an init inside the container that forwards signals and reaps processes
+--interactive , -i + Keep STDIN open even if not attached
--io-maxbandwidth Maximum IO bandwidth limit for the system drive (Windows only)
--io-maxiops Maximum IOps limit for the system drive (Windows only)
--ip IPv4 address (e.g., 172.30.100.104)
--ip6 IPv6 address (e.g., 2001:db8::33)
--ipc IPC mode to use
--isolation Container isolation technology
--kernel-memory Kernel memory limit
+--label , -l + Set meta data on a container
--label-file Read in a line delimited file of labels
--link Add link to another container
--link-local-ip Container IPv4/IPv6 link-local addresses
--log-driver Logging driver for the container
--log-opt Log driver options
--mac-address Container MAC address (e.g., 92:d0:c6:0a:29:33)
+--memory , -m + Memory limit
--memory-reservation Memory soft limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--memory-swappiness -1 Tune container memory swappiness (0 to 100)
--mount Attach a filesystem mount to the container
--name Assign a name to the container
--net Connect a container to a network
--net-alias Add network-scoped alias for the container
--network Connect a container to a network
--network-alias Add network-scoped alias for the container
--no-healthcheck Disable any container-specified HEALTHCHECK
--oom-kill-disable Disable OOM Killer
--oom-score-adj Tune host's OOM preferences (-1000 to 1000)
--pid PID namespace to use
--pids-limit Tune container pids limit (set -1 for unlimited)
--platform Set platform if server is multi-platform capable
--privileged Give extended privileges to this container
+--publish , -p + Publish a container's port(s) to the host
+--publish-all , -P + Publish all exposed ports to random ports
--pull missing Pull image before running ("always"|"missing"|"never")
--read-only Mount the container's root filesystem as read only
--restart no Restart policy to apply when a container exits
--rm Automatically remove the container when it exits
--runtime Runtime to use for this container
--security-opt Security Options
--shm-size Size of /dev/shm
--sig-proxy true Proxy received signals to the process
--stop-signal SIGTERM Signal to stop a container
--stop-timeout Timeout (in seconds) to stop a container
--storage-opt Storage driver options for the container
--sysctl Sysctl options
--tmpfs Mount a tmpfs directory
+--tty , -t + Allocate a pseudo-TTY
--ulimit Ulimit options
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
--userns User namespace to use
--uts UTS namespace to use
+--volume , -v + Bind mount a volume
--volume-driver Optional volume driver for the container
--volumes-from Mount volumes from the specified container(s)
+--workdir , -w + Working directory inside the container

Examples

Assign name and allocate pseudo-TTY (--name, -it)

$ docker run --name test -it debian
+
+root@d6c0fe130dba:/# exit 13
+$ echo $?
+13
+$ docker ps -a | grep test
+d6c0fe130dba        debian:7            "/bin/bash"         26 seconds ago      Exited (13) 17 seconds ago                         test
+

This example runs a container named test using the debian:latest image. The -it instructs Docker to allocate a pseudo-TTY connected to the container’s stdin; creating an interactive bash shell in the container. In the example, the bash shell is quit by entering exit 13. This exit code is passed on to the caller of docker run, and is recorded in the test container’s metadata.

Capture container ID (--cidfile)

$ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
+

This will create a container and print test to the console. The cidfile flag makes Docker attempt to create a new file and write the container ID to it. If the file exists already, Docker will return an error. Docker will close this file when docker run exits.

Full container capabilities (--privileged)

$ docker run -t -i --rm ubuntu bash
+root@bc338942ef20:/# mount -t tmpfs none /mnt
+mount: permission denied
+

This will not work, because by default, most potentially dangerous kernel capabilities are dropped; including cap_sys_admin (which is required to mount filesystems). However, the --privileged flag will allow it to run:

$ docker run -t -i --privileged ubuntu bash
+root@50e3f57e16e6:/# mount -t tmpfs none /mnt
+root@50e3f57e16e6:/# df -h
+Filesystem      Size  Used Avail Use% Mounted on
+none            1.9G     0  1.9G   0% /mnt
+

The --privileged flag gives all capabilities to the container, and it also lifts all the limitations enforced by the device cgroup controller. In other words, the container can then do almost everything that the host can do. This flag exists to allow special use-cases, like running Docker within Docker.

Set working directory (-w)

$ docker  run -w /path/to/dir/ -i -t  ubuntu pwd
+

The -w lets the command being executed inside directory given, here /path/to/dir/. If the path does not exist it is created inside the container.

Set storage driver options per container

$ docker run -it --storage-opt size=120G fedora /bin/bash
+

This (size) will allow to set the container rootfs size to 120G at creation time. This option is only available for the devicemapper, btrfs, overlay2, windowsfilter and zfs graph drivers. For the devicemapper, btrfs, windowsfilter and zfs graph drivers, user cannot pass a size less than the Default BaseFS Size. For the overlay2 storage driver, the size option is only available if the backing fs is xfs and mounted with the pquota mount option. Under these conditions, user can pass any size less than the backing fs size.

Mount tmpfs (--tmpfs)

$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image
+

The --tmpfs flag mounts an empty tmpfs into the container with the rw, noexec, nosuid, size=65536k options.

Mount volume (-v, --read-only)

$ docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
+

The -v flag mounts the current working directory into the container. The -w lets the command being executed inside the current working directory, by changing into the directory to the value returned by pwd. So this combination executes the command using the container, but inside the current working directory.

$ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash
+

When the host directory of a bind-mounted volume doesn’t exist, Docker will automatically create this directory on the host for you. In the example above, Docker will create the /doesnt/exist folder before starting your container.

$ docker run --read-only -v /icanwrite busybox touch /icanwrite/here
+

Volumes can be used in combination with --read-only to control where a container writes files. The --read-only flag mounts the container’s root filesystem as read only prohibiting writes to locations other than the specified volumes for the container.

$ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh
+

By bind-mounting the docker unix socket and statically linked docker binary (refer to get the linux binary), you give the container the full access to create and manipulate the host’s Docker daemon.

On Windows, the paths must be specified using Windows-style semantics.

PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt
+Contents of file
+
+PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt
+Contents of file
+

The following examples will fail when using Windows-based containers, as the destination of a volume or bind mount inside the container must be one of: a non-existing or empty directory; or a drive other than C:. Further, the source of a bind mount must be a local directory, not a file.

net use z: \\remotemachine\share
+docker run -v z:\foo:c:\dest ...
+docker run -v \\uncpath\to\directory:c:\dest ...
+docker run -v c:\foo\somefile.txt:c:\dest ...
+docker run -v c:\foo:c: ...
+docker run -v c:\foo:c:\existing-directory-with-contents ...
+

For in-depth information about volumes, refer to manage data in containers

Add bind mounts or volumes using the --mount flag

The --mount flag allows you to mount volumes, host-directories and tmpfs mounts in a container.

The --mount flag supports most options that are supported by the -v or the --volume flag, but uses a different syntax. For in-depth information on the --mount flag, and a comparison between --volume and --mount, refer to the service create command reference.

Even though there is no plan to deprecate --volume, usage of --mount is recommended.

Examples:

$ docker run --read-only --mount type=volume,target=/icanwrite busybox touch /icanwrite/here
+
$ docker run -t -i --mount type=bind,src=/data,dst=/data busybox sh
+

Publish or expose port (-p, --expose)

$ docker run -p 127.0.0.1:80:8080/tcp ubuntu bash
+

This binds port 8080 of the container to TCP port 80 on 127.0.0.1 of the host machine. You can also specify udp and sctp ports. The Docker User Guide explains in detail how to manipulate ports in Docker.

Note that ports which are not bound to the host (i.e., -p 80:80 instead of -p 127.0.0.1:80:80) will be accessible from the outside. This also applies if you configured UFW to block this specific port, as Docker manages its own iptables rules. Read more

$ docker run --expose 80 ubuntu bash
+

This exposes port 80 of the container without publishing the port to the host system’s interfaces.

+ Set the pull policy (--pull)

Use the --pull flag to set the image pull policy when creating (and running) the container.

The --pull flag can take one of these values:

Value Description
+missing (default) Pull the image if it was not found in the image cache, or use the cached image otherwise.
never Do not pull the image, even if it’s missing, and produce an error if the image does not exist in the image cache.
always Always perform a pull before creating the container.

When creating (and running) a container from an image, the daemon checks if the image exists in the local image cache. If the image is missing, an error is returned to the cli, allowing it to initiate a pull.

The default (missing) is to only pull the image if it is not present in the daemon’s image cache. This default allows you to run images that only exist locally (for example, images you built from a Dockerfile, but that have not been pushed to a registry), and reduces networking.

The always option always initiates a pull before creating the container. This option makes sure the image is up-to-date, and prevents you from using outdated images, but may not be suitable in situations where you want to test a locally built image before pushing (as pulling the image overwrites the existing image in the image cache).

The never option disables (implicit) pulling images when creating containers, and only uses images that are available in the image cache. If the specified image is not found, an error is produced, and the container is not created. This option is useful in situations where networking is not available, or to prevent images from being pulled implicitly when creating containers.

The following example shows docker run with the --pull=never option set, which produces en error as the image is missing in the image-cache:

$ docker run --pull=never hello-world
+docker: Error response from daemon: No such image: hello-world:latest.
+

Set environment variables (-e, --env, --env-file)

$ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash
+

Use the -e, --env, and --env-file flags to set simple (non-array) environment variables in the container you’re running, or overwrite variables that are defined in the Dockerfile of the image you’re running.

You can define the variable and its value when running the container:

$ docker run --env VAR1=value1 --env VAR2=value2 ubuntu env | grep VAR
+VAR1=value1
+VAR2=value2
+

You can also use variables that you’ve exported to your local environment:

export VAR1=value1
+export VAR2=value2
+
+$ docker run --env VAR1 --env VAR2 ubuntu env | grep VAR
+VAR1=value1
+VAR2=value2
+

When running the command, the Docker CLI client checks the value the variable has in your local environment and passes it to the container. If no = is provided and that variable is not exported in your local environment, the variable won’t be set in the container.

You can also load the environment variables from a file. This file should use the syntax <variable>=value (which sets the variable to the given value) or <variable> (which takes the value from the local environment), and # for comments.

$ cat env.list
+# This is a comment
+VAR1=value1
+VAR2=value2
+USER
+
+$ docker run --env-file env.list ubuntu env | grep -E 'VAR|USER'
+VAR1=value1
+VAR2=value2
+USER=jonzeolla
+

Set metadata on container (-l, --label, --label-file)

A label is a key=value pair that applies metadata to a container. To label a container with two labels:

$ docker run -l my-label --label com.example.foo=bar ubuntu bash
+

The my-label key doesn’t specify a value so the label defaults to an empty string (""). To add multiple labels, repeat the label flag (-l or --label).

The key=value must be unique to avoid overwriting the label value. If you specify labels with identical keys but different values, each subsequent value overwrites the previous. Docker uses the last key=value you supply.

Use the --label-file flag to load multiple labels from a file. Delimit each label in the file with an EOL mark. The example below loads labels from a labels file in the current directory:

$ docker run --label-file ./labels ubuntu bash
+

The label-file format is similar to the format for loading environment variables. (Unlike environment variables, labels are not visible to processes running inside a container.) The following example illustrates a label-file format:

com.example.label1="a label"
+
+# this is a comment
+com.example.label2=another\ label
+com.example.label3
+

You can load multiple label-files by supplying multiple --label-file flags.

For additional information on working with labels, see Labels - custom metadata in Docker in the Docker User Guide.

Connect a container to a network (--network)

When you start a container use the --network flag to connect it to a network. This adds the busybox container to the my-net network.

$ docker run -itd --network=my-net busybox
+

You can also choose the IP addresses for the container with --ip and --ip6 flags when you start the container on a user-defined network.

$ docker run -itd --network=my-net --ip=10.10.9.75 busybox
+

If you want to add a running container to a network use the docker network connect subcommand.

You can connect multiple containers to the same network. Once connected, the containers can communicate easily using only another container’s IP address or name. For overlay networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way.

Note

Service discovery is unavailable on the default bridge network. Containers can communicate via their IP addresses by default. To communicate by name, they must be linked.

You can disconnect a container from a network using the docker network disconnect command.

Mount volumes from container (--volumes-from)

$ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd
+

The --volumes-from flag mounts all the defined volumes from the referenced containers. Containers can be specified by repetitions of the --volumes-from argument. The container ID may be optionally suffixed with :ro or :rw to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted in the same mode (read write or read only) as the reference container.

Labeling systems like SELinux require that proper labels are placed on volume content mounted into a container. Without a label, the security system might prevent the processes running inside the container from using the content. By default, Docker does not change the labels set by the OS.

To change the label in the container context, you can add either of two suffixes :z or :Z to the volume mount. These suffixes tell Docker to relabel file objects on the shared volumes. The z option tells Docker that two containers share the volume content. As a result, Docker labels the content with a shared content label. Shared volume labels allow all containers to read/write content. The Z option tells Docker to label the content with a private unshared label. Only the current container can use a private volume.

Attach to STDIN/STDOUT/STDERR (-a)

The -a flag tells docker run to bind to the container’s STDIN, STDOUT or STDERR. This makes it possible to manipulate the output and input as needed.

$ echo "test" | docker run -i -a stdin ubuntu cat -
+

This pipes data into a container and prints the container’s ID by attaching only to the container’s STDIN.

$ docker run -a stderr ubuntu echo test
+

This isn’t going to print anything unless there’s an error because we’ve only attached to the STDERR of the container. The container’s logs still store what’s been written to STDERR and STDOUT.

$ cat somefile | docker run -i -a stdin mybuilder dobuild
+

This is how piping a file into a container could be done for a build. The container’s ID will be printed after the build is done and the build logs could be retrieved using docker logs. This is useful if you need to pipe a file or something else into a container and retrieve the container’s ID once the container has finished running.

Add host device to container (--device)

$ docker run --device=/dev/sdc:/dev/xvdc \
+             --device=/dev/sdd --device=/dev/zero:/dev/nulo \
+             -i -t \
+             ubuntu ls -l /dev/{xvdc,sdd,nulo}
+
+brw-rw---- 1 root disk 8, 2 Feb  9 16:05 /dev/xvdc
+brw-rw---- 1 root disk 8, 3 Feb  9 16:05 /dev/sdd
+crw-rw-rw- 1 root root 1, 5 Feb  9 16:05 /dev/nulo
+

It is often necessary to directly expose devices to a container. The --device option enables that. For example, a specific block storage device or loop device or audio device can be added to an otherwise unprivileged container (without the --privileged flag) and have the application directly access it.

By default, the container will be able to read, write and mknod these devices. This can be overridden using a third :rwm set of options to each --device flag. If the container is running in privileged mode, then the permissions specified will be ignored.

$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
+
+Command (m for help): q
+$ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk  /dev/xvdc
+You will not be able to write the partition table.
+
+Command (m for help): q
+
+$ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk  /dev/xvdc
+
+Command (m for help): q
+
+$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk  /dev/xvdc
+fdisk: unable to open /dev/xvdc: Operation not permitted
+

Note

The --device option cannot be safely used with ephemeral devices. Block devices that may be removed should not be added to untrusted containers with --device.

For Windows, the format of the string passed to the --device option is in the form of --device=<IdType>/<Id>. Beginning with Windows Server 2019 and Windows 10 October 2018 Update, Windows only supports an IdType of class and the Id as a device interface class GUID. Refer to the table defined in the Windows container docs for a list of container-supported device interface class GUIDs.

If this option is specified for a process-isolated Windows container, all devices that implement the requested device interface class GUID are made available in the container. For example, the command below makes all COM ports on the host visible in the container.

PS C:\> docker run --device=class/86E0D1E0-8089-11D0-9CE4-08003E301F73 mcr.microsoft.com/windows/servercore:ltsc2019
+

Note

The --device option is only supported on process-isolated Windows containers. This option fails if the container isolation is hyperv or when running Linux Containers on Windows (LCOW).

+ Using dynamically created devices (--device-cgroup-rule)

Devices available to a container are assigned at creation time. The assigned devices will both be added to the cgroup.allow file and created into the container once it is run. This poses a problem when a new device needs to be added to running container.

One of the solutions is to add a more permissive rule to a container allowing it access to a wider range of devices. For example, supposing our container needs access to a character device with major 42 and any number of minor number (added as new devices appear), the following rule would be added:

$ docker run -d --device-cgroup-rule='c 42:* rmw' -name my-container my-image
+

Then, a user could ask udev to execute a script that would docker exec my-container mknod newDevX c 42 <minor> the required device when it is added.

Note: initially present devices still need to be explicitly added to the docker run / docker create command.

Access an NVIDIA GPU

The --gpus flag allows you to access NVIDIA GPU resources. First you need to install nvidia-container-runtime. Visit Specify a container’s resources for more information.

To use --gpus, specify which GPUs (or all) to use. If no value is provied, all available GPUs are used. The example below exposes all available GPUs.

$ docker run -it --rm --gpus all ubuntu nvidia-smi
+

Use the device option to specify GPUs. The example below exposes a specific GPU.

$ docker run -it --rm --gpus device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a ubuntu nvidia-smi
+

The example below exposes the first and third GPUs.

$ docker run -it --rm --gpus '"device=0,2"' nvidia-smi
+

Restart policies (--restart)

Use Docker’s --restart to specify a container’s restart policy. A restart policy controls whether the Docker daemon restarts a container after exit. Docker supports the following restart policies:

Policy Result
no Do not automatically restart the container when it exits. This is the default.
on-failure[:max-retries] Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts.
unless-stopped Restart the container unless it is explicitly stopped or Docker itself is stopped or restarted.
always Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container.
$ docker run --restart=always redis
+

This will run the redis container with a restart policy of always so that if the container exits, Docker will restart it.

More detailed information on restart policies can be found in the Restart Policies (--restart) section of the Docker run reference page.

Add entries to container hosts file (--add-host)

You can add other hosts into a container’s /etc/hosts file by using one or more --add-host flags. This example adds a static address for a host named docker:

$ docker run --add-host=docker:93.184.216.34 --rm -it alpine
+
+/ # ping docker
+PING docker (93.184.216.34): 56 data bytes
+64 bytes from 93.184.216.34: seq=0 ttl=37 time=93.052 ms
+64 bytes from 93.184.216.34: seq=1 ttl=37 time=92.467 ms
+64 bytes from 93.184.216.34: seq=2 ttl=37 time=92.252 ms
+^C
+--- docker ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 92.209/92.495/93.052 ms
+

Sometimes you need to connect to the Docker host from within your container. To enable this, pass the Docker host’s IP address to the container using the --add-host flag. To find the host’s address, use the ip addr show command.

The flags you pass to ip addr show depend on whether you are using IPv4 or IPv6 networking in your containers. Use the following flags for IPv4 address retrieval for a network device named eth0:

$ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print $2}' | cut -d / -f 1 | sed -n 1p`
+$ docker run  --add-host=docker:${HOSTIP} --rm -it debian
+

For IPv6 use the -6 flag instead of the -4 flag. For other network devices, replace eth0 with the correct device name (for example docker0 for the bridge device).

Set ulimits in container (--ulimit)

Since setting ulimit settings in a container requires extra privileges not available in the default container, you can set these using the --ulimit flag. --ulimit is specified with a soft and hard limit as such: <type>=<soft limit>[:<hard limit>], for example:

$ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n"
+1024
+

Note

If you do not provide a hard limit, the soft limit is used for both values. If no ulimits are set, they are inherited from the default ulimits set on the daemon. The as option is disabled now. In other words, the following script is not supported:

$ docker run -it --ulimit as=1024 fedora /bin/bash
+

The values are sent to the appropriate syscall as they are set. Docker doesn’t perform any byte conversion. Take this into account when setting the values.

For nproc usage

Be careful setting nproc with the ulimit flag as nproc is designed by Linux to set the maximum number of processes available to a user, not to a container. For example, start four containers with daemon user:

$ docker run -d -u daemon --ulimit nproc=3 busybox top
+
+$ docker run -d -u daemon --ulimit nproc=3 busybox top
+
+$ docker run -d -u daemon --ulimit nproc=3 busybox top
+
+$ docker run -d -u daemon --ulimit nproc=3 busybox top
+

The 4th container fails and reports “[8] System error: resource temporarily unavailable” error. This fails because the caller set nproc=3 resulting in the first three containers using up the three processes quota set for the daemon user.

Stop container with signal (--stop-signal)

The --stop-signal flag sets the system call signal that will be sent to the container to exit. This signal can be a signal name in the format SIG<NAME>, for instance SIGKILL, or an unsigned number that matches a position in the kernel’s syscall table, for instance 9.

The default is SIGTERM if not specified.

Optional security options (--security-opt)

On Windows, this flag can be used to specify the credentialspec option. The credentialspec must be in the format file://spec.txt or registry://keyname.

Stop container with timeout (--stop-timeout)

The --stop-timeout flag sets the number of seconds to wait for the container to stop after sending the pre-defined (see --stop-signal) system call signal. If the container does not exit after the timeout elapses, it is forcibly killed with a SIGKILL signal.

If --stop-timeout is set to -1, no timeout is applied, and the daemon will wait indefinitely for the container to exit.

The default is determined by the daemon, and is 10 seconds for Linux containers, and 30 seconds for Windows containers.

Specify isolation technology for container (--isolation)

This option is useful in situations where you are running Docker containers on Windows. The --isolation=<value> option sets a container’s isolation technology. On Linux, the only supported is the default option which uses Linux namespaces. These two commands are equivalent on Linux:

$ docker run -d busybox top
+$ docker run -d --isolation default busybox top
+

On Windows, --isolation can take one of these values:

Value Description
default Use the value specified by the Docker daemon’s --exec-opt or system default (see below).
process Shared-kernel namespace isolation.
hyperv Hyper-V hypervisor partition-based isolation.

The default isolation on Windows server operating systems is process, and hyperv on Windows client operating systems, such as Windows 10. Process isolation is more performant, but requires the image to

On Windows server, assuming the default configuration, these commands are equivalent and result in process isolation:

PS C:\> docker run -d microsoft/nanoserver powershell echo process
+PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process
+PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process
+

If you have set the --exec-opt isolation=hyperv option on the Docker daemon, or are running against a Windows client-based daemon, these commands are equivalent and result in hyperv isolation:

PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv
+PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv
+PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv
+

Specify hard limits on memory available to containers (-m, --memory)

These parameters always set an upper limit on the memory available to the container. On Linux, this is set on the cgroup and applications in a container can query it at /sys/fs/cgroup/memory/memory.limit_in_bytes.

On Windows, this will affect containers differently depending on what type of isolation is used.

Configure namespaced kernel parameters (sysctls) at runtime

The --sysctl sets namespaced kernel parameters (sysctls) in the container. For example, to turn on IP forwarding in the containers network namespace, run this command:

$ docker run --sysctl net.ipv4.ip_forward=1 someimage
+

Note

Not all sysctls are namespaced. Docker does not support changing sysctls inside of a container that also modify the host system. As the kernel evolves we expect to see more sysctls become namespaced.

Currently supported sysctls

IPC Namespace:

Network Namespace:

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/run/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsave%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsave%2Findex.html new file mode 100644 index 00000000..51dbc313 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsave%2Findex.html @@ -0,0 +1,26 @@ +

docker save


Save one or more images to a tar archive (streamed to STDOUT by default)

Usage

$ docker save [OPTIONS] IMAGE [IMAGE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Produces a tarred repository to the standard output stream. Contains all parent layers, and all tags + versions, or specified repo:tag, for each argument provided.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--output , -o + Write to a file, instead of STDOUT

Examples

Create a backup that can then be used with docker load.

$ docker save busybox > busybox.tar
+
+$ ls -sh busybox.tar
+
+2.7M busybox.tar
+
+$ docker save --output busybox.tar busybox
+
+$ ls -sh busybox.tar
+
+2.7M busybox.tar
+
+$ docker save -o fedora-all.tar fedora
+
+$ docker save -o fedora-latest.tar fedora:latest
+

Save an image to a tar.gz file using gzip

You can use gzip to save the image file and make the backup smaller.

$ docker save myimage:latest | gzip > myimage_latest.tar.gz
+

Cherry-pick particular tags

You can even cherry-pick particular tags of an image repository.

$ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/save/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsearch%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsearch%2Findex.html new file mode 100644 index 00000000..1272fa6f --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsearch%2Findex.html @@ -0,0 +1,79 @@ +

docker search


Search the Docker Hub for images

Usage

$ docker search [OPTIONS] TERM
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Search Docker Hub for images

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print search using a Go template
--limit 25 Max number of search results
--no-trunc Don't truncate output

Examples

Search images by name

This example displays images with a name containing ‘busybox’:

$ docker search busybox
+
+NAME                             DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
+busybox                          Busybox base image.                             316       [OK]
+progrium/busybox                                                                 50                   [OK]
+radial/busyboxplus               Full-chain, Internet enabled, busybox made...   8                    [OK]
+odise/busybox-python                                                             2                    [OK]
+azukiapp/busybox                 This image is meant to be used as the base...   2                    [OK]
+ofayau/busybox-jvm               Prepare busybox to install a 32 bits JVM.       1                    [OK]
+shingonoide/archlinux-busybox    Arch Linux, a lightweight and flexible Lin...   1                    [OK]
+odise/busybox-curl                                                               1                    [OK]
+ofayau/busybox-libc32            Busybox with 32 bits (and 64 bits) libs         1                    [OK]
+peelsky/zulu-openjdk-busybox                                                     1                    [OK]
+skomma/busybox-data              Docker image suitable for data volume cont...   1                    [OK]
+elektritter/busybox-teamspeak    Lightweight teamspeak3 container based on...    1                    [OK]
+socketplane/busybox                                                              1                    [OK]
+oveits/docker-nginx-busybox      This is a tiny NginX docker image based on...   0                    [OK]
+ggtools/busybox-ubuntu           Busybox ubuntu version with extra goodies       0                    [OK]
+nikfoundas/busybox-confd         Minimal busybox based distribution of confd     0                    [OK]
+openshift/busybox-http-app                                                       0                    [OK]
+jllopis/busybox                                                                  0                    [OK]
+swyckoff/busybox                                                                 0                    [OK]
+powellquiring/busybox                                                            0                    [OK]
+williamyeh/busybox-sh            Docker image for BusyBox's sh                   0                    [OK]
+simplexsys/busybox-cli-powered   Docker busybox images, with a few often us...   0                    [OK]
+fhisamoto/busybox-java           Busybox java                                    0                    [OK]
+scottabernethy/busybox                                                           0                    [OK]
+marclop/busybox-solr
+

Display non-truncated description (--no-trunc)

This example displays images with a name containing ‘busybox’, at least 3 stars and the description isn’t truncated in the output:

$ docker search --filter=stars=3 --no-trunc busybox
+
+NAME                 DESCRIPTION                                                                               STARS     OFFICIAL   AUTOMATED
+busybox              Busybox base image.                                                                       325       [OK]
+progrium/busybox                                                                                               50                   [OK]
+radial/busyboxplus   Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors.   8                    [OK]
+

Limit search results (--limit)

The flag --limit is the maximum number of results returned by a search. This value could be in the range between 1 and 100. The default value of --limit is 25.

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g. --filter is-automated=true --filter stars=3)

The currently supported filters are:

stars

This example displays images with a name containing ‘busybox’ and at least 3 stars:

$ docker search --filter stars=3 busybox
+
+NAME                 DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
+busybox              Busybox base image.                             325       [OK]
+progrium/busybox                                                     50                   [OK]
+radial/busyboxplus   Full-chain, Internet enabled, busybox made...   8                    [OK]
+

is-automated

This example displays images with a name containing ‘busybox’ and are automated builds:

$ docker search --filter is-automated=true busybox
+
+NAME                 DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
+progrium/busybox                                                     50                   [OK]
+radial/busyboxplus   Full-chain, Internet enabled, busybox made...   8                    [OK]
+

is-official

This example displays images with a name containing ‘busybox’, at least 3 stars and are official builds:

$ docker search --filter is-official=true --filter stars=3 busybox
+
+NAME      DESCRIPTION           STARS     OFFICIAL   AUTOMATED
+busybox   Busybox base image.   325       [OK]
+

Format the output

The formatting option (--format) pretty-prints search output using a Go template.

Valid placeholders for the Go template are:

Placeholder Description
.Name Image Name
.Description Image description
.StarCount Number of stars for the image
.IsOfficial “OK” if image is official
.IsAutomated “OK” if image build was automated

When you use the --format option, the search command will output the data exactly as the template declares. If you use the table directive, column headers are included as well.

The following example uses a template without headers and outputs the Name and StarCount entries separated by a colon (:) for all images:

$ docker search --format "{{.Name}}: {{.StarCount}}" nginx
+
+nginx: 5441
+jwilder/nginx-proxy: 953
+richarvey/nginx-php-fpm: 353
+million12/nginx-php: 75
+webdevops/php-nginx: 70
+h3nrik/nginx-ldap: 35
+bitnami/nginx: 23
+evild/alpine-nginx: 14
+million12/nginx: 9
+maxexcloo/nginx: 7
+

This example outputs a table format:

$ docker search --format "table {{.Name}}\t{{.IsAutomated}}\t{{.IsOfficial}}" nginx
+
+NAME                                     AUTOMATED           OFFICIAL
+nginx                                                        [OK]
+jwilder/nginx-proxy                      [OK]
+richarvey/nginx-php-fpm                  [OK]
+jrcs/letsencrypt-nginx-proxy-companion   [OK]
+million12/nginx-php                      [OK]
+webdevops/php-nginx                      [OK]
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/search/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret%2Findex.html new file mode 100644 index 00000000..4d71bd29 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret%2Findex.html @@ -0,0 +1,7 @@ +

docker secret


Manage Docker secrets

Swarm This command works with the Swarm orchestrator.

Usage

$ docker secret COMMAND
+

Description

Manage secrets.

Child commands

Command Description
docker secret create Create a secret from a file or STDIN as content
docker secret inspect Display detailed information on one or more secrets
docker secret ls List secrets
docker secret rm Remove one or more secrets
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/secret/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_create%2Findex.html new file mode 100644 index 00000000..5c57bfc9 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_create%2Findex.html @@ -0,0 +1,52 @@ +

docker secret create


Create a secret from a file or STDIN as content

Swarm This command works with the Swarm orchestrator.

Usage

$ docker secret create [OPTIONS] SECRET [file|-]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Creates a secret using standard input or from a file for the secret content.

For detailed information about using secrets, refer to manage sensitive data with Docker secrets.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--driver , -d + Secret driver
+--label , -l + Secret labels
--template-driver Template driver

Examples

Create a secret

$ printf "my super secret password" | docker secret create my_secret -
+
+onakdyv307se2tl7nl20anokv
+
+$ docker secret ls
+
+ID                          NAME                CREATED             UPDATED
+onakdyv307se2tl7nl20anokv   my_secret           6 seconds ago       6 seconds ago
+

Create a secret with a file

$ docker secret create my_secret ./secret.json
+
+dg426haahpi5ezmkkj5kyl3sn
+
+$ docker secret ls
+
+ID                          NAME                CREATED             UPDATED
+dg426haahpi5ezmkkj5kyl3sn   my_secret           7 seconds ago       7 seconds ago
+

Create a secret with labels

$ docker secret create \
+  --label env=dev \
+  --label rev=20170324 \
+  my_secret ./secret.json
+
+eo7jnzguqgtpdah3cm5srfb97
+
$ docker secret inspect my_secret
+
+[
+    {
+        "ID": "eo7jnzguqgtpdah3cm5srfb97",
+        "Version": {
+            "Index": 17
+        },
+        "CreatedAt": "2017-03-24T08:15:09.735271783Z",
+        "UpdatedAt": "2017-03-24T08:15:09.735271783Z",
+        "Spec": {
+            "Name": "my_secret",
+            "Labels": {
+                "env": "dev",
+                "rev": "20170324"
+            }
+        }
+    }
+]
+

Parent command

Command Description
docker secret Manage Docker secrets
Command Description
docker secret create Create a secret from a file or STDIN as content
docker secret inspect Display detailed information on one or more secrets
docker secret ls List secrets
docker secret rm Remove one or more secrets
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/secret_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_inspect%2Findex.html new file mode 100644 index 00000000..d4e02194 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_inspect%2Findex.html @@ -0,0 +1,34 @@ +

docker secret inspect


Display detailed information on one or more secrets

Swarm This command works with the Swarm orchestrator.

Usage

$ docker secret inspect [OPTIONS] SECRET [SECRET...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Inspects the specified secret.

By default, this renders all results in a JSON array. If a format is specified, the given template will be executed for each result.

Go’s text/template package describes all the details of the format.

For detailed information about using secrets, refer to manage sensitive data with Docker secrets.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
--pretty Print the information in a human friendly format

Examples

Inspect a secret by name or ID

You can inspect a secret, either by its name, or ID

For example, given the following secret:

$ docker secret ls
+
+ID                          NAME                CREATED             UPDATED
+eo7jnzguqgtpdah3cm5srfb97   my_secret           3 minutes ago       3 minutes ago
+
$ docker secret inspect secret.json
+

The output is in JSON format, for example:

[
+  {
+    "ID": "eo7jnzguqgtpdah3cm5srfb97",
+    "Version": {
+      "Index": 17
+    },
+    "CreatedAt": "2017-03-24T08:15:09.735271783Z",
+    "UpdatedAt": "2017-03-24T08:15:09.735271783Z",
+    "Spec": {
+      "Name": "my_secret",
+      "Labels": {
+        "env": "dev",
+        "rev": "20170324"
+      }
+    }
+  }
+]
+

Formatting

You can use the --format option to obtain specific information about a secret. The following example command outputs the creation time of the secret.

$ docker secret inspect --format='{{.CreatedAt}}' eo7jnzguqgtpdah3cm5srfb97
+
+2017-03-24 08:15:09.735271783 +0000 UTC
+

Parent command

Command Description
docker secret Manage Docker secrets
Command Description
docker secret create Create a secret from a file or STDIN as content
docker secret inspect Display detailed information on one or more secrets
docker secret ls List secrets
docker secret rm Remove one or more secrets
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/secret_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_ls%2Findex.html new file mode 100644 index 00000000..d6441381 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_ls%2Findex.html @@ -0,0 +1,48 @@ +

docker secret ls


List secrets

Swarm This command works with the Swarm orchestrator.

Usage

$ docker secret ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Run this command on a manager node to list the secrets in the swarm.

For detailed information about using secrets, refer to manage sensitive data with Docker secrets.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print secrets using a Go template
+--quiet , -q + Only display IDs

Examples

$ docker secret ls
+
+ID                          NAME                        CREATED             UPDATED
+6697bflskwj1998km1gnnjr38   q5s5570vtvnimefos1fyeo2u2   6 weeks ago         6 weeks ago
+9u9hk4br2ej0wgngkga6rp4hq   my_secret                   5 weeks ago         5 weeks ago
+mem02h8n73mybpgqjf0kfi1n0   test_secret                 3 seconds ago       3 seconds ago
+

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

id

The id filter matches all or prefix of a secret’s id.

$ docker secret ls -f "id=6697bflskwj1998km1gnnjr38"
+
+ID                          NAME                        CREATED             UPDATED
+6697bflskwj1998km1gnnjr38   q5s5570vtvnimefos1fyeo2u2   6 weeks ago         6 weeks ago
+

label

The label filter matches secrets based on the presence of a label alone or a label and a value.

The following filter matches all secrets with a project label regardless of its value:

$ docker secret ls --filter label=project
+
+ID                          NAME                        CREATED             UPDATED
+mem02h8n73mybpgqjf0kfi1n0   test_secret                 About an hour ago   About an hour ago
+

The following filter matches only services with the project label with the project-a value.

$ docker service ls --filter label=project=test
+
+ID                          NAME                        CREATED             UPDATED
+mem02h8n73mybpgqjf0kfi1n0   test_secret                 About an hour ago   About an hour ago
+

name

The name filter matches on all or prefix of a secret’s name.

The following filter matches secret with a name containing a prefix of test.

$ docker secret ls --filter name=test_secret
+
+ID                          NAME                        CREATED             UPDATED
+mem02h8n73mybpgqjf0kfi1n0   test_secret                 About an hour ago   About an hour ago
+

Format the output

The formatting option (--format) pretty prints secrets output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Secret ID
.Name Secret name
.CreatedAt Time when the secret was created
.UpdatedAt Time when the secret was updated
.Labels All labels assigned to the secret
.Label Value of a specific label for this secret. For example {{.Label "secret.ssh.key"}} +

When using the --format option, the secret ls command will either output the data exactly as the template declares or, when using the table directive, will include column headers as well.

The following example uses a template without headers and outputs the ID and Name entries separated by a colon (:) for all images:

$ docker secret ls --format "{{.ID}}: {{.Name}}"
+
+77af4d6b9913: secret-1
+b6fa739cedf5: secret-2
+78a85c484f71: secret-3
+

To list all secrets with their name and created date in a table format you can use:

$ docker secret ls --format "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}"
+
+ID                  NAME                      CREATED
+77af4d6b9913        secret-1                  5 minutes ago
+b6fa739cedf5        secret-2                  3 hours ago
+78a85c484f71        secret-3                  10 days ago
+

Parent command

Command Description
docker secret Manage Docker secrets
Command Description
docker secret create Create a secret from a file or STDIN as content
docker secret inspect Display detailed information on one or more secrets
docker secret ls List secrets
docker secret rm Remove one or more secrets
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/secret_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_rm%2Findex.html new file mode 100644 index 00000000..c64a9309 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsecret_rm%2Findex.html @@ -0,0 +1,9 @@ +

docker secret rm


Remove one or more secrets

Swarm This command works with the Swarm orchestrator.

Usage

$ docker secret rm SECRET [SECRET...]
+

Description

Removes the specified secrets from the swarm.

For detailed information about using secrets, refer to manage sensitive data with Docker secrets.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Examples

This example removes a secret:

$ docker secret rm secret.json
+sapth4csdo5b6wz2p5uimh5xg
+

Warning

Unlike docker rm, this command does not ask for confirmation before removing a secret.

Parent command

Command Description
docker secret Manage Docker secrets
Command Description
docker secret create Create a secret from a file or STDIN as content
docker secret inspect Display detailed information on one or more secrets
docker secret ls List secrets
docker secret rm Remove one or more secrets
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/secret_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice%2Findex.html new file mode 100644 index 00000000..981f91ad --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice%2Findex.html @@ -0,0 +1,7 @@ +

docker service


Manage services

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service COMMAND
+

Description

Manage services.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

Child commands

Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_create%2Findex.html new file mode 100644 index 00000000..c9a18312 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_create%2Findex.html @@ -0,0 +1,229 @@ +

docker service create


Create a new service

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service create [OPTIONS] IMAGE [COMMAND] [ARG...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Creates a service as described by the specified parameters.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--cap-add +API 1.41+
Add Linux capabilities
--cap-drop +API 1.41+
Drop Linux capabilities
--config +API 1.41+
Specify configurations to expose to the service
--constraint Placement constraints
--container-label Container labels
--credential-spec Credential spec for managed service account (Windows only)
+--detach , -d + Exit immediately instead of waiting for the service to converge
--dns Set custom DNS servers
--dns-option Set DNS options
--dns-search Set custom DNS search domains
--endpoint-mode vip Endpoint mode (vip or dnsrr)
--entrypoint Overwrite the default ENTRYPOINT of the image
+--env , -e + Set environment variables
--env-file Read in a file of environment variables
--generic-resource User defined resources
--group Set one or more supplementary user groups for the container
--health-cmd Command to run to check health
--health-interval Time between running the check (ms|s|m|h)
--health-retries Consecutive failures needed to report unhealthy
--health-start-period Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)
--health-timeout Maximum time to allow one check to run (ms|s|m|h)
--host Set one or more custom host-to-IP mappings (host:ip)
--hostname Container hostname
--init Use an init inside each service container to forward signals and reap processes
--isolation Service container isolation mode
+--label , -l + Service labels
--limit-cpu Limit CPUs
--limit-memory Limit Memory
--limit-pids +API 1.41+Swarm
Limit maximum number of processes (default 0 = unlimited)
--log-driver Logging driver for service
--log-opt Logging driver options
--max-concurrent +API 1.41+
Number of job tasks to run concurrently (default equal to --replicas)
--mode replicated Service mode (replicated, global, replicated-job, or global-job)
--mount Attach a filesystem mount to the service
--name Service name
--network Network attachments
--no-healthcheck Disable any container-specified HEALTHCHECK
--no-resolve-image Do not query the registry to resolve image digest and supported platforms
--placement-pref Add a placement preference
+--publish , -p + Publish a port as a node port
+--quiet , -q + Suppress progress output
--read-only Mount the container's root filesystem as read only
--replicas Number of tasks
--replicas-max-per-node +API 1.40+
Maximum number of tasks per node (default 0 = unlimited)
--reserve-cpu Reserve CPUs
--reserve-memory Reserve Memory
--restart-condition Restart when condition is met ("none"|"on-failure"|"any") (default "any")
--restart-delay Delay between restart attempts (ns|us|ms|s|m|h) (default 5s)
--restart-max-attempts Maximum number of restarts before giving up
--restart-window Window used to evaluate the restart policy (ns|us|ms|s|m|h)
--rollback-delay Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)
--rollback-failure-action Action on rollback failure ("pause"|"continue") (default "pause")
--rollback-max-failure-ratio Failure rate to tolerate during a rollback (default 0)
--rollback-monitor Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s)
--rollback-order Rollback order ("start-first"|"stop-first") (default "stop-first")
--rollback-parallelism 1 Maximum number of tasks rolled back simultaneously (0 to roll back all at once)
--secret Specify secrets to expose to the service
--stop-grace-period Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s)
--stop-signal Signal to stop the container
--sysctl +API 1.40+
Sysctl options
+--tty , -t + +API 1.40+
Allocate a pseudo-TTY
--ulimit +API 1.41+
Ulimit options
--update-delay Delay between updates (ns|us|ms|s|m|h) (default 0s)
--update-failure-action Action on update failure ("pause"|"continue"|"rollback") (default "pause")
--update-max-failure-ratio Failure rate to tolerate during an update (default 0)
--update-monitor Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 5s)
--update-order Update order ("start-first"|"stop-first") (default "stop-first")
--update-parallelism 1 Maximum number of tasks updated simultaneously (0 to update all at once)
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
--with-registry-auth Send registry authentication details to swarm agents
+--workdir , -w + Working directory inside the container

Examples

Create a service

$ docker service create --name redis redis:3.0.6
+
+dmu1ept4cxcfe8k8lhtux3ro3
+
+$ docker service create --mode global --name redis2 redis:3.0.6
+
+a8q9dasaafudfs8q8w32udass
+
+$ docker service ls
+
+ID            NAME    MODE        REPLICAS  IMAGE
+dmu1ept4cxcf  redis   replicated  1/1       redis:3.0.6
+a8q9dasaafud  redis2  global      1/1       redis:3.0.6
+

Create a service using an image on a private registry

If your image is available on a private registry which requires login, use the --with-registry-auth flag with docker service create, after logging in. If your image is stored on registry.example.com, which is a private registry, use a command like the following:

$ docker login registry.example.com
+
+$ docker service  create \
+  --with-registry-auth \
+  --name my_service \
+  registry.example.com/acme/my_image:latest
+

This passes the login token from your local client to the swarm nodes where the service is deployed, using the encrypted WAL logs. With this information, the nodes are able to log into the registry and pull the image.

Create a service with 5 replica tasks (--replicas)

Use the --replicas flag to set the number of replica tasks for a replicated service. The following command creates a redis service with 5 replica tasks:

$ docker service create --name redis --replicas=5 redis:3.0.6
+
+4cdgfyky7ozwh3htjfw0d12qv
+

The above command sets the desired number of tasks for the service. Even though the command returns immediately, actual scaling of the service may take some time. The REPLICAS column shows both the actual and desired number of replica tasks for the service.

In the following example the desired state is 5 replicas, but the current number of RUNNING tasks is 3:

$ docker service ls
+
+ID            NAME   MODE        REPLICAS  IMAGE
+4cdgfyky7ozw  redis  replicated  3/5       redis:3.0.7
+

Once all the tasks are created and RUNNING, the actual number of tasks is equal to the desired number:

$ docker service ls
+
+ID            NAME   MODE        REPLICAS  IMAGE
+4cdgfyky7ozw  redis  replicated  5/5       redis:3.0.7
+

Create a service with secrets

Use the --secret flag to give a container access to a secret.

Create a service specifying a secret:

$ docker service create --name redis --secret secret.json redis:3.0.6
+
+4cdgfyky7ozwh3htjfw0d12qv
+

Create a service specifying the secret, target, user/group ID, and mode:

$ docker service create --name redis \
+    --secret source=ssh-key,target=ssh \
+    --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \
+    redis:3.0.6
+
+4cdgfyky7ozwh3htjfw0d12qv
+

To grant a service access to multiple secrets, use multiple --secret flags.

Secrets are located in /run/secrets in the container if no target is specified. If no target is specified, the name of the secret is used as the in memory file in the container. If a target is specified, that is used as the filename. In the example above, two files are created: /run/secrets/ssh and /run/secrets/app for each of the secret targets specified.

Create a service with configs

Use the --config flag to give a container access to a config.

Create a service with a config. The config will be mounted into redis-config, be owned by the user who runs the command inside the container (often root), and have file mode 0444 or world-readable. You can specify the uid and gid as numerical IDs or names. When using names, the provided group/user names must pre-exist in the container. The mode is specified as a 4-number sequence such as 0755.

$ docker service create --name=redis --config redis-conf redis:3.0.6
+

Create a service with a config and specify the target location and file mode:

$ docker service create --name redis \
+  --config source=redis-conf,target=/etc/redis/redis.conf,mode=0400 redis:3.0.6
+

To grant a service access to multiple configs, use multiple --config flags.

Configs are located in / in the container if no target is specified. If no target is specified, the name of the config is used as the name of the file in the container. If a target is specified, that is used as the filename.

Create a service with a rolling update policy

$ docker service create \
+  --replicas 10 \
+  --name redis \
+  --update-delay 10s \
+  --update-parallelism 2 \
+  redis:3.0.6
+

When you run a service update, the scheduler updates a maximum of 2 tasks at a time, with 10s between updates. For more information, refer to the rolling updates tutorial.

Set environment variables (-e, --env)

This sets an environment variable for all tasks in a service. For example:

$ docker service create \
+  --name redis_2 \
+  --replicas 5 \
+  --env MYVAR=foo \
+  redis:3.0.6
+

To specify multiple environment variables, specify multiple --env flags, each with a separate key-value pair.

$ docker service create \
+  --name redis_2 \
+  --replicas 5 \
+  --env MYVAR=foo \
+  --env MYVAR2=bar \
+  redis:3.0.6
+

Create a service with specific hostname (--hostname)

This option sets the docker service containers hostname to a specific string. For example:

$ docker service create --name redis --hostname myredis redis:3.0.6
+

Set metadata on a service (-l, --label)

A label is a key=value pair that applies metadata to a service. To label a service with two labels:

$ docker service create \
+  --name redis_2 \
+  --label com.example.foo="bar"
+  --label bar=baz \
+  redis:3.0.6
+

For more information about labels, refer to apply custom metadata.

Add bind mounts, volumes or memory filesystems

Docker supports three different kinds of mounts, which allow containers to read from or write to files or directories, either on the host operating system, or on memory filesystems. These types are data volumes (often referred to simply as volumes), bind mounts, tmpfs, and named pipes.

A bind mount makes a file or directory on the host available to the container it is mounted within. A bind mount may be either read-only or read-write. For example, a container might share its host’s DNS information by means of a bind mount of the host’s /etc/resolv.conf or a container might write logs to its host’s /var/log/myContainerLogs directory. If you use bind mounts and your host and containers have different notions of permissions, access controls, or other such details, you will run into portability issues.

A named volume is a mechanism for decoupling persistent data needed by your container from the image used to create the container and from the host machine. Named volumes are created and managed by Docker, and a named volume persists even when no container is currently using it. Data in named volumes can be shared between a container and the host machine, as well as between multiple containers. Docker uses a volume driver to create, manage, and mount volumes. You can back up or restore volumes using Docker commands.

A tmpfs mounts a tmpfs inside a container for volatile data.

A npipe mounts a named pipe from the host into the container.

Consider a situation where your image starts a lightweight web server. You could use that image as a base image, copy in your website’s HTML files, and package that into another image. Each time your website changed, you’d need to update the new image and redeploy all of the containers serving your website. A better solution is to store the website in a named volume which is attached to each of your web server containers when they start. To update the website, you just update the named volume.

For more information about named volumes, see Data Volumes.

The following table describes options which apply to both bind mounts and named volumes in a service:

Option Required Description
type

The type of mount, can be either volume, bind, tmpfs, or npipe. Defaults to volume if no type is specified.

  • +volume: mounts a managed volume into the container.
  • +bind: bind-mounts a directory or file from the host into the container.
  • +tmpfs: mount a tmpfs in the container
  • +npipe: mounts named pipe from the host into the container (Windows containers only).
+src or source + for type=bind and type=npipe +
  • type=volume: src is an optional way to specify the name of the volume (for example, src=my-volume). If the named volume does not exist, it is automatically created. If no src is specified, the volume is assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. A randomly-named volume has the same lifecycle as its container and is destroyed when the container is destroyed (which is upon service update, or when scaling or re-balancing the service)
  • type=bind: src is required, and specifies an absolute path to the file or directory to bind-mount (for example, src=/path/on/host/). An error is produced if the file or directory does not exist.
  • type=tmpfs: src is not supported.

dst or destination or target

yes

Mount path inside the container, for example /some/path/in/container/. If the path does not exist in the container's filesystem, the Engine creates a directory at the specified location before mounting the volume or bind mount.

readonly or ro

The Engine mounts binds and volumes read-write unless readonly option is given when mounting the bind or volume. Note that setting readonly for a bind-mount does not make its submounts readonly on the current Linux implementation. See also bind-nonrecursive.

  • +true or 1 or no value: Mounts the bind or volume read-only.
  • +false or 0: Mounts the bind or volume read-write.

Options for Bind Mounts

The following options can only be used for bind mounts (type=bind):

Option Description
bind-propagation

See the bind propagation section.

consistency

The consistency requirements for the mount; one of

  • +default: Equivalent to consistent.
  • +consistent: Full consistency. The container runtime and the host maintain an identical view of the mount at all times.
  • +cached: The host's view of the mount is authoritative. There may be delays before updates made on the host are visible within a container.
  • +delegated: The container runtime's view of the mount is authoritative. There may be delays before updates made in a container are visible on the host.
bind-nonrecursive By default, submounts are recursively bind-mounted as well. However, this behavior can be confusing when a bind mount is configured with readonly option, because submounts are not mounted as read-only. Set bind-nonrecursive to disable recursive bind-mount.

A value is optional:

  • +true or 1: Disables recursive bind-mount.
  • +false or 0: Default if you do not provide a value. Enables recursive bind-mount.
Bind propagation

Bind propagation refers to whether or not mounts created within a given bind mount or named volume can be propagated to replicas of that mount. Consider a mount point /mnt, which is also mounted on /tmp. The propagation settings control whether a mount on /tmp/a would also be available on /mnt/a. Each propagation setting has a recursive counterpoint. In the case of recursion, consider that /tmp/a is also mounted as /foo. The propagation settings control whether /mnt/a and/or /tmp/a would exist.

The bind-propagation option defaults to rprivate for both bind mounts and volume mounts, and is only configurable for bind mounts. In other words, named volumes do not support bind propagation.

For more information about bind propagation, see the Linux kernel documentation for shared subtree.

Options for named volumes

The following options can only be used for named volumes (type=volume):

Option Description
volume-driver

Name of the volume-driver plugin to use for the volume. Defaults to "local", to use the local volume driver to create the volume if the volume does not exist.

volume-label One or more custom metadata ("labels") to apply to the volume upon creation. For example, volume-label=mylabel=hello-world,my-other-label=hello-mars. For more information about labels, refer to apply custom metadata.
volume-nocopy By default, if you attach an empty volume to a container, and files or directories already existed at the mount-path in the container (dst), the Engine copies those files and directories into the volume, allowing the host to access them. Set volume-nocopy to disable copying files from the container's filesystem to the volume and mount the empty volume.

A value is optional:

  • +true or 1: Default if you do not provide a value. Disables copying.
  • +false or 0: Enables copying.
volume-opt Options specific to a given volume driver, which will be passed to the driver when creating the volume. Options are provided as a comma-separated list of key/value pairs, for example, volume-opt=some-option=some-value,volume-opt=some-other-option=some-other-value. For available options for a given driver, refer to that driver's documentation.

Options for tmpfs

The following options can only be used for tmpfs mounts (type=tmpfs);

Option Description
tmpfs-size Size of the tmpfs mount in bytes. Unlimited by default in Linux.
tmpfs-mode File mode of the tmpfs in octal. (e.g. "700" or "0700".) Defaults to "1777" in Linux.

Differences between “--mount” and “--volume”

The --mount flag supports most options that are supported by the -v or --volume flag for docker run, with some important exceptions:

Create a service using a named volume

The following example creates a service that uses a named volume:

$ docker service create \
+  --name my-service \
+  --replicas 3 \
+  --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \
+  nginx:alpine
+

For each replica of the service, the engine requests a volume named “my-volume” from the default (“local”) volume driver where the task is deployed. If the volume does not exist, the engine creates a new volume and applies the “color” and “shape” labels.

When the task is started, the volume is mounted on /path/in/container/ inside the container.

Be aware that the default (“local”) volume is a locally scoped volume driver. This means that depending on where a task is deployed, either that task gets a new volume named “my-volume”, or shares the same “my-volume” with other tasks of the same service. Multiple containers writing to a single shared volume can cause data corruption if the software running inside the container is not designed to handle concurrent processes writing to the same location. Also take into account that containers can be re-scheduled by the Swarm orchestrator and be deployed on a different node.

Create a service that uses an anonymous volume

The following command creates a service with three replicas with an anonymous volume on /path/in/container:

$ docker service create \
+  --name my-service \
+  --replicas 3 \
+  --mount type=volume,destination=/path/in/container \
+  nginx:alpine
+

In this example, no name (source) is specified for the volume, so a new volume is created for each task. This guarantees that each task gets its own volume, and volumes are not shared between tasks. Anonymous volumes are removed after the task using them is complete.

Create a service that uses a bind-mounted host directory

The following example bind-mounts a host directory at /path/in/container in the containers backing the service:

$ docker service create \
+  --name my-service \
+  --mount type=bind,source=/path/on/host,destination=/path/in/container \
+  nginx:alpine
+

Set service mode (--mode)

The service mode determines whether this is a replicated service or a global service. A replicated service runs as many tasks as specified, while a global service runs on each active node in the swarm.

The following command creates a global service:

$ docker service create \
+ --name redis_2 \
+ --mode global \
+ redis:3.0.6
+

Specify service constraints (--constraint)

You can limit the set of nodes where a task can be scheduled by defining constraint expressions. Constraint expressions can either use a match (==) or exclude (!=) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows:

node attribute matches example
node.id Node ID node.id==2ivku8v2gvtg4
node.hostname Node hostname node.hostname!=node-2
node.role Node role (manager/worker) node.role==manager
node.platform.os Node operating system node.platform.os==windows
node.platform.arch Node architecture node.platform.arch==x86_64
node.labels User-defined node labels node.labels.security==high
engine.labels Docker Engine’s labels engine.labels.operatingsystem==ubuntu-14.04

engine.labels apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add node.labels for operational purposes by using the docker node update command.

For example, the following limits tasks for the redis service to nodes where the node type label equals queue:

$ docker service create \
+  --name redis_2 \
+  --constraint node.platform.os==linux \
+  --constraint node.labels.type==queue \
+  redis:3.0.6
+

If the service constraints exclude all nodes in the cluster, a message is printed that no suitable node is found, but the scheduler will start a reconciliation loop and deploy the service once a suitable node becomes available.

In the example below, no node satisfying the constraint was found, causing the service to not reconcile with the desired state:

$ docker service create \
+  --name web \
+  --constraint node.labels.region==east \
+  nginx:alpine
+
+lx1wrhhpmbbu0wuk0ybws30bc
+overall progress: 0 out of 1 tasks
+1/1: no suitable node (scheduling constraints not satisfied on 5 nodes)
+
+$ docker service ls
+ID                  NAME     MODE         REPLICAS   IMAGE               PORTS
+b6lww17hrr4e        web      replicated   0/1        nginx:alpine
+

After adding the region=east label to a node in the cluster, the service reconciles, and the desired number of replicas are deployed:

$ docker node update --label-add region=east yswe2dm4c5fdgtsrli1e8ya5l
+yswe2dm4c5fdgtsrli1e8ya5l
+
+$ docker service ls
+ID                  NAME     MODE         REPLICAS   IMAGE               PORTS
+b6lww17hrr4e        web      replicated   1/1        nginx:alpine
+

Specify service placement preferences (--placement-pref)

You can set up the service to divide tasks evenly over different categories of nodes. One example of where this can be useful is to balance tasks over a set of datacenters or availability zones. The example below illustrates this:

$ docker service create \
+  --replicas 9 \
+  --name redis_2 \
+  --placement-pref spread=node.labels.datacenter \
+  redis:3.0.6
+

This uses --placement-pref with a spread strategy (currently the only supported strategy) to spread tasks evenly over the values of the datacenter node label. In this example, we assume that every node has a datacenter node label attached to it. If there are three different values of this label among nodes in the swarm, one third of the tasks will be placed on the nodes associated with each value. This is true even if there are more nodes with one value than another. For example, consider the following set of nodes:

Since we are spreading over the values of the datacenter label and the service has 9 replicas, 3 replicas will end up in each datacenter. There are three nodes associated with the value east, so each one will get one of the three replicas reserved for this value. There are two nodes with the value south, and the three replicas for this value will be divided between them, with one receiving two replicas and another receiving just one. Finally, west has a single node that will get all three replicas reserved for west.

If the nodes in one category (for example, those with node.labels.datacenter=south) can’t handle their fair share of tasks due to constraints or resource limitations, the extra tasks will be assigned to other nodes instead, if possible.

Both engine labels and node labels are supported by placement preferences. The example above uses a node label, because the label is referenced with node.labels.datacenter. To spread over the values of an engine label, use --placement-pref spread=engine.labels.<labelname>.

It is possible to add multiple placement preferences to a service. This establishes a hierarchy of preferences, so that tasks are first divided over one category, and then further divided over additional categories. One example of where this may be useful is dividing tasks fairly between datacenters, and then splitting the tasks within each datacenter over a choice of racks. To add multiple placement preferences, specify the --placement-pref flag multiple times. The order is significant, and the placement preferences will be applied in the order given when making scheduling decisions.

The following example sets up a service with multiple placement preferences. Tasks are spread first over the various datacenters, and then over racks (as indicated by the respective labels):

$ docker service create \
+  --replicas 9 \
+  --name redis_2 \
+  --placement-pref 'spread=node.labels.datacenter' \
+  --placement-pref 'spread=node.labels.rack' \
+  redis:3.0.6
+

When updating a service with docker service update, --placement-pref-add appends a new placement preference after all existing placement preferences. --placement-pref-rm removes an existing placement preference that matches the argument.

Specify memory requirements and constraints for a service (--reserve-memory and --limit-memory)

If your service needs a minimum amount of memory in order to run correctly, you can use --reserve-memory to specify that the service should only be scheduled on a node with this much memory available to reserve. If no node is available that meets the criteria, the task is not scheduled, but remains in a pending state.

The following example requires that 4GB of memory be available and reservable on a given node before scheduling the service to run on that node.

$ docker service create --reserve-memory=4GB --name=too-big nginx:alpine
+

The managers won’t schedule a set of containers on a single node whose combined reservations exceed the memory available on that node.

After a task is scheduled and running, --reserve-memory does not enforce a memory limit. Use --limit-memory to ensure that a task uses no more than a given amount of memory on a node. This example limits the amount of memory used by the task to 4GB. The task will be scheduled even if each of your nodes has only 2GB of memory, because --limit-memory is an upper limit.

$ docker service create --limit-memory=4GB --name=too-big nginx:alpine
+

Using --reserve-memory and --limit-memory does not guarantee that Docker will not use more memory on your host than you want. For instance, you could create many services, the sum of whose memory usage could exhaust the available memory.

You can prevent this scenario from exhausting the available memory by taking into account other (non-containerized) software running on the host as well. If --reserve-memory is greater than or equal to --limit-memory, Docker won’t schedule a service on a host that doesn’t have enough memory. --limit-memory will limit the service’s memory to stay within that limit, so if every service has a memory-reservation and limit set, Docker services will be less likely to saturate the host. Other non-service containers or applications running directly on the Docker host could still exhaust memory.

There is a downside to this approach. Reserving memory also means that you may not make optimum use of the memory available on the node. Consider a service that under normal circumstances uses 100MB of memory, but depending on load can “peak” at 500MB. Reserving 500MB for that service (to guarantee can have 500MB for those “peaks”) results in 400MB of memory being wasted most of the time.

In short, you can take a more conservative or more flexible approach:

The approach you take depends heavily on the memory-usage patterns of your workloads. You should test under normal and peak conditions before settling on an approach.

On Linux, you can also limit a service’s overall memory footprint on a given host at the level of the host operating system, using cgroups or other relevant operating system tools.

Specify maximum replicas per node (--replicas-max-per-node)

Use the --replicas-max-per-node flag to set the maximum number of replica tasks that can run on a node. The following command creates a nginx service with 2 replica tasks but only one replica task per node.

One example where this can be useful is to balance tasks over a set of data centers together with --placement-pref and let --replicas-max-per-node setting make sure that replicas are not migrated to another datacenter during maintenance or datacenter failure.

The example below illustrates this:

$ docker service create \
+  --name nginx \
+  --replicas 2 \
+  --replicas-max-per-node 1 \
+  --placement-pref 'spread=node.labels.datacenter' \
+  nginx
+

Attach a service to an existing network (--network)

You can use overlay networks to connect one or more services within the swarm.

First, create an overlay network on a manager node the docker network create command:

$ docker network create --driver overlay my-network
+
+etjpu59cykrptrgw0z0hk5snf
+

After you create an overlay network in swarm mode, all manager nodes have access to the network.

When you create a service and pass the --network flag to attach the service to the overlay network:

$ docker service create \
+  --replicas 3 \
+  --network my-network \
+  --name my-web \
+  nginx
+
+716thylsndqma81j6kkkb5aus
+

The swarm extends my-network to each node running the service.

Containers on the same network can access each other using service discovery.

Long form syntax of --network allows to specify list of aliases and driver options: --network name=my-network,alias=web1,driver-opt=field1=value1

Publish service ports externally to the swarm (-p, --publish)

You can publish service ports to make them available externally to the swarm using the --publish flag. The --publish flag can take two different styles of arguments. The short version is positional, and allows you to specify the published port and target port separated by a colon (:).

$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx
+

There is also a long format, which is easier to read and allows you to specify more options. The long format is preferred. You cannot specify the service’s mode when using the short format. Here is an example of using the long format for the same service as above:

$ docker service create --name my_web --replicas 3 --publish published=8080,target=80 nginx
+

The options you can specify are:

Option Short syntax Long syntax Description
published and target port --publish 8080:80 --publish published=8080,target=80

The target port within the container and the port to map it to on the nodes, using the routing mesh (ingress) or host-level networking. More options are available, later in this table. The key-value syntax is preferred, because it is somewhat self-documenting.

mode Not possible to set using short syntax. --publish published=8080,target=80,mode=host

The mode to use for binding the port, either ingress or host. Defaults to ingress to use the routing mesh.

protocol --publish 8080:80/tcp --publish published=8080,target=80,protocol=tcp

The protocol to use, tcp , udp, or sctp. Defaults to tcp. To bind a port for both protocols, specify the -p or --publish flag twice.

When you publish a service port using ingress mode, the swarm routing mesh makes the service accessible at the published port on every node regardless if there is a task for the service running on the node. If you use host mode, the port is only bound on nodes where the service is running, and a given port on a node can only be bound once. You can only set the publication mode using the long syntax. For more information refer to Use swarm mode routing mesh.

Provide credential specs for managed service accounts (Windows only)

This option is only used for services using Windows containers. The --credential-spec must be in the format file://<filename> or registry://<value-name>.

When using the file://<filename> format, the referenced file must be present in the CredentialSpecs subdirectory in the docker data directory, which defaults to C:\ProgramData\Docker\ on Windows. For example, specifying file://spec.json loads C:\ProgramData\Docker\CredentialSpecs\spec.json.

When using the registry://<value-name> format, the credential spec is read from the Windows registry on the daemon’s host. The specified registry value must be located in:

HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs
+

Create services using templates

You can use templates for some flags of service create, using the syntax provided by the Go’s text/template package.

The supported flags are the following :

Valid placeholders for the Go template are listed below:

Placeholder Description
.Service.ID Service ID
.Service.Name Service name
.Service.Labels Service labels
.Node.ID Node ID
.Node.Hostname Node Hostname
.Task.ID Task ID
.Task.Name Task name
.Task.Slot Task slot

Template example

In this example, we are going to set the template of the created containers based on the service’s name, the node’s ID and hostname where it sits.

$ docker service create \
+    --name hosttempl \
+    --hostname="{{.Node.Hostname}}-{{.Node.ID}}-{{.Service.Name}}"\
+    busybox top
+
+va8ew30grofhjoychbr6iot8c
+
+$ docker service ps va8ew30grofhjoychbr6iot8c
+
+ID            NAME         IMAGE                                                                                   NODE          DESIRED STATE  CURRENT STATE               ERROR  PORTS
+wo41w8hg8qan  hosttempl.1  busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912  2e7a8a9c4da2  Running        Running about a minute ago
+
+$ docker inspect --format="{{.Config.Hostname}}" 2e7a8a9c4da2-wo41w8hg8qanxwjwsg4kxpprj-hosttempl
+
+x3ti0erg11rjpg64m75kej2mz-hosttempl
+

Specify isolation mode (Windows)

By default, tasks scheduled on Windows nodes are run using the default isolation mode configured for this particular node. To force a specific isolation mode, you can use the --isolation flag:

$ docker service create --name myservice --isolation=process microsoft/nanoserver
+

Supported isolation modes on Windows are:

Create services requesting Generic Resources

You can narrow the kind of nodes your task can land on through the using the --generic-resource flag (if the nodes advertise these resources):

$ docker service create \
+    --name cuda \
+    --generic-resource "NVIDIA-GPU=2" \
+    --generic-resource "SSD=1" \
+    nvidia/cuda
+

Running as a job

Jobs are a special kind of service designed to run an operation to completion and then stop, as opposed to running long-running daemons. When a Task belonging to a job exits successfully (return value 0), the Task is marked as “Completed”, and is not run again.

Jobs are started by using one of two modes, replicated-job or global-job

$ docker service create --name myjob \
+                        --mode replicated-job \
+                        bash "true"
+

This command will run one Task, which will, using the bash image, execute the command true, which will return 0 and then exit.

Though Jobs are ultimately a different kind of service, they a couple of caveats compared to other services:

Jobs are available in both replicated and global modes.

Replicated Jobs

A replicated job is like a replicated service. Setting the --replicas flag will specify total number of iterations of a job to execute.

By default, all replicas of a replicated job will launch at once. To control the total number of replicas that are executing simultaneously at any one time, the --max-concurrent flag can be used:

$ docker service create \
+    --name mythrottledjob \
+    --mode replicated-job \
+    --replicas 10 \
+    --max-concurrent 2 \
+    bash "true"
+

The above command will execute 10 Tasks in total, but only 2 of them will be run at any given time.

Global Jobs

Global jobs are like global services, in that a Task is executed once on each node matching placement constraints. Global jobs are represented by the mode global-job.

Note that after a Global job is created, any new Nodes added to the cluster will have a Task from that job started on them. The Global Job does not as a whole have a “done” state, except insofar as every Node meeting the job’s constraints has a Completed task.

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_inspect%2Findex.html new file mode 100644 index 00000000..d11560a2 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_inspect%2Findex.html @@ -0,0 +1,89 @@ +

docker service inspect


Display detailed information on one or more services

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service inspect [OPTIONS] SERVICE [SERVICE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Inspects the specified service.

By default, this renders all results in a JSON array. If a format is specified, the given template will be executed for each result.

Go’s text/template package describes all the details of the format.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
--pretty Print the information in a human friendly format

Examples

Inspect a service by name or ID

You can inspect a service, either by its name, or ID

For example, given the following service;

$ docker service ls
+ID            NAME   MODE        REPLICAS  IMAGE
+dmu1ept4cxcf  redis  replicated  3/3       redis:3.0.6
+

Both docker service inspect redis, and docker service inspect dmu1ept4cxcf produce the same result:

$ docker service inspect redis
+

The output is in JSON format, for example:

[
+  {
+    "ID": "dmu1ept4cxcfe8k8lhtux3ro3",
+    "Version": {
+      "Index": 12
+    },
+    "CreatedAt": "2016-06-17T18:44:02.558012087Z",
+    "UpdatedAt": "2016-06-17T18:44:02.558012087Z",
+    "Spec": {
+      "Name": "redis",
+      "TaskTemplate": {
+        "ContainerSpec": {
+          "Image": "redis:3.0.6"
+        },
+        "Resources": {
+          "Limits": {},
+          "Reservations": {}
+        },
+        "RestartPolicy": {
+          "Condition": "any",
+          "MaxAttempts": 0
+        },
+        "Placement": {}
+      },
+      "Mode": {
+        "Replicated": {
+          "Replicas": 1
+        }
+      },
+      "UpdateConfig": {},
+      "EndpointSpec": {
+        "Mode": "vip"
+      }
+    },
+    "Endpoint": {
+      "Spec": {}
+    }
+  }
+]
+
$ docker service inspect dmu1ept4cxcf
+
+[
+  {
+    "ID": "dmu1ept4cxcfe8k8lhtux3ro3",
+    "Version": {
+      "Index": 12
+    },
+    ...
+  }
+]
+

Formatting

You can print the inspect output in a human-readable format instead of the default JSON output, by using the --pretty option:

$ docker service inspect --pretty frontend
+
+ID:     c8wgl7q4ndfd52ni6qftkvnnp
+Name:   frontend
+Labels:
+ - org.example.projectname=demo-app
+Service Mode:   REPLICATED
+ Replicas:      5
+Placement:
+UpdateConfig:
+ Parallelism:   0
+ On failure:    pause
+ Max failure ratio: 0
+ContainerSpec:
+ Image:     nginx:alpine
+Resources:
+Networks:   net1
+Endpoint Mode:  vip
+Ports:
+ PublishedPort = 4443
+  Protocol = tcp
+  TargetPort = 443
+  PublishMode = ingress
+

You can also use --format pretty for the same effect.

Find the number of tasks running as part of a service

The --format option can be used to obtain specific information about a service. For example, the following command outputs the number of replicas of the “redis” service.

$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis
+
+10
+

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_logs%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_logs%2Findex.html new file mode 100644 index 00000000..13558de2 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_logs%2Findex.html @@ -0,0 +1,13 @@ +

docker service logs


Fetch the logs of a service or task

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service logs [OPTIONS] SERVICE|TASK
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker service logs command batch-retrieves logs present at the time of execution.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

The docker service logs command can be used with either the name or ID of a service, or with the ID of a task. If a service is passed, it will display logs for all of the containers in that service. If a task is passed, it will only display logs from that particular task.

Note

This command is only functional for services that are started with the json-file or journald logging driver.

For more information about selecting and configuring logging drivers, refer to Configure logging drivers.

The docker service logs --follow command will continue streaming the new output from the service’s STDOUT and STDERR.

Passing a negative number or a non-integer to --tail is invalid and the value is set to all in that case.

The docker service logs --timestamps command will add an RFC3339Nano timestamp , for example 2014-09-16T06:17:46.000000000Z, to each log entry. To ensure that the timestamps are aligned the nano-second part of the timestamp will be padded with zero when necessary.

The docker service logs --details command will add on extra attributes, such as environment variables and labels, provided to --log-opt when creating the service.

The --since option shows only the service logs generated after a given date. You can specify the date as an RFC 3339 date, a UNIX timestamp, or a Go duration string (e.g. 1m30s, 3h). Besides RFC3339 date format you may also use RFC3339Nano, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the client will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. You can combine the --since option with either or both of the --follow or --tail options.

Options

Name, shorthand Default Description
--details Show extra details provided to logs
+--follow , -f + Follow log output
--no-resolve Do not map IDs to Names in output
--no-task-ids Do not include task IDs in output
--no-trunc Do not truncate output
--raw Do not neatly format logs
--since Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes)
+--tail , -n + all Number of lines to show from the end of the logs
+--timestamps , -t + Show timestamps

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_logs/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ls%2Findex.html new file mode 100644 index 00000000..aa0431c8 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ls%2Findex.html @@ -0,0 +1,40 @@ +

docker service ls


List services

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

This command lists services are running in the swarm.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print services using a Go template
+--quiet , -q + Only display IDs

Examples

On a manager node:

$ docker service ls
+
+ID            NAME      MODE            REPLICAS             IMAGE
+c8wgl7q4ndfd  frontend  replicated      5/5                  nginx:alpine
+dmu1ept4cxcf  redis     replicated      3/3                  redis:3.0.6
+iwe3278osahj  mongo     global          7/7                  mongo:3.3
+hh08h9uu8uwr  job       replicated-job  1/1 (3/5 completed)  nginx:latest
+

The REPLICAS column shows both the actual and desired number of tasks for the service. If the service is in replicated-job or global-job, it will additionally show the completion status of the job as completed tasks over total tasks the job will execute.

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

id

The id filter matches all or part of a service’s id.

$ docker service ls -f "id=0bcjw"
+ID            NAME   MODE        REPLICAS  IMAGE
+0bcjwfh8ychr  redis  replicated  1/1       redis:3.0.6
+

label

The label filter matches services based on the presence of a label alone or a label and a value.

The following filter matches all services with a project label regardless of its value:

$ docker service ls --filter label=project
+ID            NAME       MODE        REPLICAS  IMAGE
+01sl1rp6nj5u  frontend2  replicated  1/1       nginx:alpine
+36xvvwwauej0  frontend   replicated  5/5       nginx:alpine
+74nzcxxjv6fq  backend    replicated  3/3       redis:3.0.6
+

The following filter matches only services with the project label with the project-a value.

$ docker service ls --filter label=project=project-a
+ID            NAME      MODE        REPLICAS  IMAGE
+36xvvwwauej0  frontend  replicated  5/5       nginx:alpine
+74nzcxxjv6fq  backend   replicated  3/3       redis:3.0.6
+

mode

The mode filter matches on the mode (either replicated or global) of a service.

The following filter matches only global services.

$ docker service ls --filter mode=global
+ID                  NAME                MODE                REPLICAS            IMAGE
+w7y0v2yrn620        top                 global              1/1                 busybox
+

name

The name filter matches on all or part of a service’s name.

The following filter matches services with a name containing redis.

$ docker service ls --filter name=redis
+ID            NAME   MODE        REPLICAS  IMAGE
+0bcjwfh8ychr  redis  replicated  1/1       redis:3.0.6
+

Formatting

The formatting options (--format) pretty-prints services output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Service ID
.Name Service name
.Mode Service mode (replicated, global)
.Replicas Service replicas
.Image Service image
.Ports Service ports published in ingress mode

When using the --format option, the service ls command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the ID, Mode, and Replicas entries separated by a colon (:) for all services:

$ docker service ls --format "{{.ID}}: {{.Mode}} {{.Replicas}}"
+
+0zmvwuiu3vue: replicated 10/10
+fm6uf97exkul: global 5/5
+

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ps%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ps%2Findex.html new file mode 100644 index 00000000..77b359ac --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_ps%2Findex.html @@ -0,0 +1,64 @@ +

docker service ps


List the tasks of one or more services

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service ps [OPTIONS] SERVICE [SERVICE...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists the tasks that are running as part of the specified services.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print tasks using a Go template
--no-resolve Do not map IDs to Names
--no-trunc Do not truncate output
+--quiet , -q + Only display task IDs

Examples

List the tasks that are part of a service

The following command shows all the tasks that are part of the redis service:

$ docker service ps redis
+
+ID             NAME      IMAGE        NODE      DESIRED STATE  CURRENT STATE          ERROR  PORTS
+0qihejybwf1x   redis.1   redis:3.0.5  manager1  Running        Running 8 seconds
+bk658fpbex0d   redis.2   redis:3.0.5  worker2   Running        Running 9 seconds
+5ls5s5fldaqg   redis.3   redis:3.0.5  worker1   Running        Running 9 seconds
+8ryt076polmc   redis.4   redis:3.0.5  worker1   Running        Running 9 seconds
+1x0v8yomsncd   redis.5   redis:3.0.5  manager1  Running        Running 8 seconds
+71v7je3el7rr   redis.6   redis:3.0.5  worker2   Running        Running 9 seconds
+4l3zm9b7tfr7   redis.7   redis:3.0.5  worker2   Running        Running 9 seconds
+9tfpyixiy2i7   redis.8   redis:3.0.5  worker1   Running        Running 9 seconds
+3w1wu13yupln   redis.9   redis:3.0.5  manager1  Running        Running 8 seconds
+8eaxrb2fqpbn   redis.10  redis:3.0.5  manager1  Running        Running 8 seconds
+

In addition to running tasks, the output also shows the task history. For example, after updating the service to use the redis:3.0.6 image, the output may look like this:

$ docker service ps redis
+
+ID            NAME         IMAGE        NODE      DESIRED STATE  CURRENT STATE                   ERROR  PORTS
+50qe8lfnxaxk  redis.1      redis:3.0.6  manager1  Running        Running 6 seconds ago
+ky2re9oz86r9   \_ redis.1  redis:3.0.5  manager1  Shutdown       Shutdown 8 seconds ago
+3s46te2nzl4i  redis.2      redis:3.0.6  worker2   Running        Running less than a second ago
+nvjljf7rmor4   \_ redis.2  redis:3.0.6  worker2   Shutdown       Rejected 23 seconds ago        "No such image: redis@sha256:6…"
+vtiuz2fpc0yb   \_ redis.2  redis:3.0.5  worker2   Shutdown       Shutdown 1 second ago
+jnarweeha8x4  redis.3      redis:3.0.6  worker1   Running        Running 3 seconds ago
+vs448yca2nz4   \_ redis.3  redis:3.0.5  worker1   Shutdown       Shutdown 4 seconds ago
+jf1i992619ir  redis.4      redis:3.0.6  worker1   Running        Running 10 seconds ago
+blkttv7zs8ee   \_ redis.4  redis:3.0.5  worker1   Shutdown       Shutdown 11 seconds ago
+

The number of items in the task history is determined by the --task-history-limit option that was set when initializing the swarm. You can change the task history retention limit using the docker swarm update command.

When deploying a service, docker resolves the digest for the service’s image, and pins the service to that digest. The digest is not shown by default, but is printed if --no-trunc is used. The --no-trunc option also shows the non-truncated task ID, and error-messages, as can be seen below;

$ docker service ps --no-trunc redis
+
+ID                          NAME         IMAGE                                                                                NODE      DESIRED STATE  CURRENT STATE            ERROR                                                                                           PORTS
+50qe8lfnxaxksi9w2a704wkp7   redis.1      redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  manager1  Running        Running 5 minutes ago
+ky2re9oz86r9556i2szb8a8af   \_ redis.1   redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e  worker2   Shutdown       Shutdown 5 minutes ago
+bk658fpbex0d57cqcwoe3jthu   redis.2      redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker2   Running        Running 5 seconds
+nvjljf7rmor4htv7l8rwcx7i7   \_ redis.2   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker2   Shutdown       Rejected 5 minutes ago   "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842"
+

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g. --filter "foo=bar" --filter "bif=baz"). Multiple filter flags are combined as an OR filter. For example, -f name=redis.1 -f name=redis.7 returns both redis.1 and redis.7 tasks.

The currently supported filters are:

id

The id filter matches on all or a prefix of a task’s ID.

$ docker service ps -f "id=8" redis
+
+ID             NAME      IMAGE        NODE      DESIRED STATE  CURRENT STATE      ERROR  PORTS
+8ryt076polmc   redis.4   redis:3.0.6  worker1   Running        Running 9 seconds
+8eaxrb2fqpbn   redis.10  redis:3.0.6  manager1  Running        Running 8 seconds
+

name

The name filter matches on task names.

$ docker service ps -f "name=redis.1" redis
+
+ID            NAME     IMAGE        NODE      DESIRED STATE  CURRENT STATE      ERROR  PORTS
+qihejybwf1x5  redis.1  redis:3.0.6  manager1  Running        Running 8 seconds
+

node

The node filter matches on a node name or a node ID.

$ docker service ps -f "node=manager1" redis
+
+ID            NAME      IMAGE        NODE      DESIRED STATE  CURRENT STATE      ERROR  PORTS
+0qihejybwf1x  redis.1   redis:3.0.6  manager1  Running        Running 8 seconds
+1x0v8yomsncd  redis.5   redis:3.0.6  manager1  Running        Running 8 seconds
+3w1wu13yupln  redis.9   redis:3.0.6  manager1  Running        Running 8 seconds
+8eaxrb2fqpbn  redis.10  redis:3.0.6  manager1  Running        Running 8 seconds
+

desired-state

The desired-state filter can take the values running, shutdown, or accepted.

Formatting

The formatting options (--format) pretty-prints tasks output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Task ID
.Name Task name
.Image Task image
.Node Node ID
.DesiredState Desired state of the task (running, shutdown, or accepted)
.CurrentState Current state of the task
.Error Error
.Ports Task published ports

When using the --format option, the service ps command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the Name and Image entries separated by a colon (:) for all tasks:

$ docker service ps --format "{{.Name}}: {{.Image}}" top
+
+top.1: busybox
+top.2: busybox
+top.3: busybox
+

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_ps/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rm%2Findex.html new file mode 100644 index 00000000..bc54b7db --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rm%2Findex.html @@ -0,0 +1,14 @@ +

docker service rm


Remove one or more services

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service rm SERVICE [SERVICE...]
+

Description

Removes the specified services from the swarm.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Examples

Remove the redis service:

$ docker service rm redis
+
+redis
+
+$ docker service ls
+
+ID  NAME  MODE  REPLICAS  IMAGE
+

Warning

Unlike docker rm, this command does not ask for confirmation before removing a running service.

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rollback%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rollback%2Findex.html new file mode 100644 index 00000000..418fdadb --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_rollback%2Findex.html @@ -0,0 +1,28 @@ +

docker service rollback


Revert changes to a service’s configuration

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service rollback [OPTIONS] SERVICE
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Roll back a specified service to its previous version from the swarm.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--detach , -d + Exit immediately instead of waiting for the service to converge
+--quiet , -q + Suppress progress output

Examples

Roll back to the previous version of a service

Use the docker service rollback command to roll back to the previous version of a service. After executing this command, the service is reverted to the configuration that was in place before the most recent docker service update command.

The following example creates a service with a single replica, updates the service to use three replicas, and then rolls back the service to the previous version, having one replica.

Create a service with a single replica:

$ docker service create --name my-service -p 8080:80 nginx:alpine
+

Confirm that the service is running with a single replica:

$ docker service ls
+
+ID                  NAME                MODE                REPLICAS            IMAGE               PORTS
+xbw728mf6q0d        my-service          replicated          1/1                 nginx:alpine        *:8080->80/tcp
+

Update the service to use three replicas:

$ docker service update --replicas=3 my-service
+
+$ docker service ls
+
+ID                  NAME                MODE                REPLICAS            IMAGE               PORTS
+xbw728mf6q0d        my-service          replicated          3/3                 nginx:alpine        *:8080->80/tcp
+

Now roll back the service to its previous version, and confirm it is running a single replica again:

$ docker service rollback my-service
+
+$ docker service ls
+
+ID                  NAME                MODE                REPLICAS            IMAGE               PORTS
+xbw728mf6q0d        my-service          replicated          1/1                 nginx:alpine        *:8080->80/tcp
+

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_rollback/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_scale%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_scale%2Findex.html new file mode 100644 index 00000000..c3779db1 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_scale%2Findex.html @@ -0,0 +1,35 @@ +

docker service scale


Scale one or multiple replicated services

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The scale command enables you to scale one or more replicated services either up or down to the desired number of replicas. This command cannot be applied on services which are global mode. The command will return immediately, but the actual scaling of the service may take some time. To stop all replicas of a service while keeping the service active in the swarm you can set the scale to 0.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--detach , -d + Exit immediately instead of waiting for the service to converge

Examples

Scale a single service

The following command scales the “frontend” service to 50 tasks.

$ docker service scale frontend=50
+
+frontend scaled to 50
+

The following command tries to scale a global service to 10 tasks and returns an error.

$ docker service create --mode global --name backend backend:latest
+
+b4g08uwuairexjub6ome6usqh
+
+$ docker service scale backend=10
+
+backend: scale can only be used with replicated or replicated-job mode
+

Directly afterwards, run docker service ls, to see the actual number of replicas.

$ docker service ls --filter name=frontend
+
+ID            NAME      MODE        REPLICAS  IMAGE
+3pr5mlvu3fh9  frontend  replicated  15/50     nginx:alpine
+

You can also scale a service using the docker service update command. The following commands are equivalent:

$ docker service scale frontend=50
+$ docker service update --replicas=50 frontend
+

Scale multiple services

The docker service scale command allows you to set the desired number of tasks for multiple services at once. The following example scales both the backend and frontend services:

$ docker service scale backend=3 frontend=5
+
+backend scaled to 3
+frontend scaled to 5
+
+$ docker service ls
+
+ID            NAME      MODE        REPLICAS  IMAGE
+3pr5mlvu3fh9  frontend  replicated  5/5       nginx:alpine
+74nzcxxjv6fq  backend   replicated  3/3       redis:3.0.6
+

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_scale/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_update%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_update%2Findex.html new file mode 100644 index 00000000..ee684a4a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fservice_update%2Findex.html @@ -0,0 +1,83 @@ +

docker service update


Update a service

Swarm This command works with the Swarm orchestrator.

Usage

$ docker service update [OPTIONS] SERVICE
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Updates a service as described by the specified parameters. The parameters are the same as docker service create. Refer to the description there for further information.

Normally, updating a service will only cause the service’s tasks to be replaced with new ones if a change to the service requires recreating the tasks for it to take effect. For example, only changing the --update-parallelism setting will not recreate the tasks, because the individual tasks are not affected by this setting. However, the --force flag will cause the tasks to be recreated anyway. This can be used to perform a rolling restart without any changes to the service parameters.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--args Service command args
--cap-add +API 1.41+
Add Linux capabilities
--cap-drop +API 1.41+
Drop Linux capabilities
--config-add +API 1.41+
Add or update a config file on a service
--config-rm +API 1.41+
Remove a configuration file
--constraint-add Add or update a placement constraint
--constraint-rm Remove a constraint
--container-label-add Add or update a container label
--container-label-rm Remove a container label by its key
--credential-spec Credential spec for managed service account (Windows only)
+--detach , -d + Exit immediately instead of waiting for the service to converge
--dns-add Add or update a custom DNS server
--dns-option-add Add or update a DNS option
--dns-option-rm Remove a DNS option
--dns-rm Remove a custom DNS server
--dns-search-add Add or update a custom DNS search domain
--dns-search-rm Remove a DNS search domain
--endpoint-mode Endpoint mode (vip or dnsrr)
--entrypoint Overwrite the default ENTRYPOINT of the image
--env-add Add or update an environment variable
--env-rm Remove an environment variable
--force Force update even if no changes require it
--generic-resource-add Add a Generic resource
--generic-resource-rm Remove a Generic resource
--group-add Add an additional supplementary user group to the container
--group-rm Remove a previously added supplementary user group from the container
--health-cmd Command to run to check health
--health-interval Time between running the check (ms|s|m|h)
--health-retries Consecutive failures needed to report unhealthy
--health-start-period Start period for the container to initialize before counting retries towards unstable (ms|s|m|h)
--health-timeout Maximum time to allow one check to run (ms|s|m|h)
--host-add Add a custom host-to-IP mapping (host:ip)
--host-rm Remove a custom host-to-IP mapping (host:ip)
--hostname Container hostname
--image Service image tag
--init Use an init inside each service container to forward signals and reap processes
--isolation Service container isolation mode
--label-add Add or update a service label
--label-rm Remove a label by its key
--limit-cpu Limit CPUs
--limit-memory Limit Memory
--limit-pids +API 1.41+Swarm
Limit maximum number of processes (default 0 = unlimited)
--log-driver Logging driver for service
--log-opt Logging driver options
--max-concurrent +API 1.41+
Number of job tasks to run concurrently (default equal to --replicas)
--mount-add Add or update a mount on a service
--mount-rm Remove a mount by its target path
--network-add Add a network
--network-rm Remove a network
--no-healthcheck Disable any container-specified HEALTHCHECK
--no-resolve-image Do not query the registry to resolve image digest and supported platforms
--placement-pref-add Add a placement preference
--placement-pref-rm Remove a placement preference
--publish-add Add or update a published port
--publish-rm Remove a published port by its target port
+--quiet , -q + Suppress progress output
--read-only Mount the container's root filesystem as read only
--replicas Number of tasks
--replicas-max-per-node +API 1.40+
Maximum number of tasks per node (default 0 = unlimited)
--reserve-cpu Reserve CPUs
--reserve-memory Reserve Memory
--restart-condition Restart when condition is met ("none"|"on-failure"|"any")
--restart-delay Delay between restart attempts (ns|us|ms|s|m|h)
--restart-max-attempts Maximum number of restarts before giving up
--restart-window Window used to evaluate the restart policy (ns|us|ms|s|m|h)
--rollback Rollback to previous specification
--rollback-delay Delay between task rollbacks (ns|us|ms|s|m|h)
--rollback-failure-action Action on rollback failure ("pause"|"continue")
--rollback-max-failure-ratio Failure rate to tolerate during a rollback
--rollback-monitor Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h)
--rollback-order Rollback order ("start-first"|"stop-first")
--rollback-parallelism Maximum number of tasks rolled back simultaneously (0 to roll back all at once)
--secret-add Add or update a secret on a service
--secret-rm Remove a secret
--stop-grace-period Time to wait before force killing a container (ns|us|ms|s|m|h)
--stop-signal Signal to stop the container
--sysctl-add +API 1.40+
Add or update a Sysctl option
--sysctl-rm +API 1.40+
Remove a Sysctl option
+--tty , -t + +API 1.40+
Allocate a pseudo-TTY
--ulimit-add +API 1.41+
Add or update a ulimit option
--ulimit-rm +API 1.41+
Remove a ulimit option
--update-delay Delay between updates (ns|us|ms|s|m|h)
--update-failure-action Action on update failure ("pause"|"continue"|"rollback")
--update-max-failure-ratio Failure rate to tolerate during an update
--update-monitor Duration after each task update to monitor for failure (ns|us|ms|s|m|h)
--update-order Update order ("start-first"|"stop-first")
--update-parallelism Maximum number of tasks updated simultaneously (0 to update all at once)
+--user , -u + Username or UID (format: <name|uid>[:<group|gid>])
--with-registry-auth Send registry authentication details to swarm agents
+--workdir , -w + Working directory inside the container

Examples

Update a service

$ docker service update --limit-cpu 2 redis
+

Perform a rolling restart with no parameter changes

$ docker service update --force --update-parallelism 1 --update-delay 30s redis
+

In this example, the --force flag causes the service’s tasks to be shut down and replaced with new ones even though none of the other parameters would normally cause that to happen. The --update-parallelism 1 setting ensures that only one task is replaced at a time (this is the default behavior). The --update-delay 30s setting introduces a 30 second delay between tasks, so that the rolling restart happens gradually.

Add or remove mounts

Use the --mount-add or --mount-rm options add or remove a service’s bind mounts or volumes.

The following example creates a service which mounts the test-data volume to /somewhere. The next step updates the service to also mount the other-volume volume to /somewhere-elsevolume, The last step unmounts the /somewhere mount point, effectively removing the test-data volume. Each command returns the service name.

$ docker service create \
+    --name=myservice \
+    --mount type=volume,source=test-data,target=/somewhere \
+    nginx:alpine
+
+myservice
+
+$ docker service update \
+    --mount-add type=volume,source=other-volume,target=/somewhere-else \
+    myservice
+
+myservice
+
+$ docker service update --mount-rm /somewhere myservice
+
+myservice
+

Add or remove published service ports

Use the --publish-add or --publish-rm flags to add or remove a published port for a service. You can use the short or long syntax discussed in the docker service create reference.

The following example adds a published service port to an existing service.

$ docker service update \
+  --publish-add published=8080,target=80 \
+  myservice
+

Add or remove network

Use the --network-add or --network-rm flags to add or remove a network for a service. You can use the short or long syntax discussed in the docker service create reference.

The following example adds a new alias name to an existing service already connected to network my-network:

$ docker service update \
+  --network-rm my-network \
+  --network-add name=my-network,alias=web1 \
+  myservice
+

Roll back to the previous version of a service

Use the --rollback option to roll back to the previous version of the service.

This will revert the service to the configuration that was in place before the most recent docker service update command.

The following example updates the number of replicas for the service from 4 to 5, and then rolls back to the previous configuration.

$ docker service update --replicas=5 web
+
+web
+
+$ docker service ls
+
+ID            NAME  MODE        REPLICAS  IMAGE
+80bvrzp6vxf3  web   replicated  0/5       nginx:alpine
+
+

Roll back the web service...

$ docker service update --rollback web
+
+web
+
+$ docker service ls
+
+ID            NAME  MODE        REPLICAS  IMAGE
+80bvrzp6vxf3  web   replicated  0/4       nginx:alpine
+
+

Other options can be combined with --rollback as well, for example, --update-delay 0s to execute the rollback without a delay between tasks:

$ docker service update \
+  --rollback \
+  --update-delay 0s
+  web
+
+web
+
+

Services can also be set up to roll back to the previous version automatically when an update fails. To set up a service for automatic rollback, use --update-failure-action=rollback. A rollback will be triggered if the fraction of the tasks which failed to update successfully exceeds the value given with --update-max-failure-ratio.

The rate, parallelism, and other parameters of a rollback operation are determined by the values passed with the following flags:

For example, a service set up with --update-parallelism 1 --rollback-parallelism 3 will update one task at a time during a normal update, but during a rollback, 3 tasks at a time will get rolled back. These rollback parameters are respected both during automatic rollbacks and for rollbacks initiated manually using --rollback.

Add or remove secrets

Use the --secret-add or --secret-rm options add or remove a service’s secrets.

The following example adds a secret named ssh-2 and removes ssh-1:

$ docker service update \
+    --secret-add source=ssh-2,target=ssh-2 \
+    --secret-rm ssh-1 \
+    myservice
+

Update services using templates

Some flags of service update support the use of templating. See service create for the reference.

Specify isolation mode (Windows)

service update supports the same --isolation flag as service create See service create for the reference.

Updating Jobs

When a service is created as a job, by setting its mode to replicated-job or to global-job when doing service create, options for updating it are limited.

Updating a Job immediately stops any Tasks that are in progress. The operation creates a new set of Tasks for the job and effectively resets its completion status. If any Tasks were running before the update, they are stopped, and new Tasks are created.

Jobs cannot be rolled out or rolled back. None of the flags for configuring update or rollback settings are valid with job modes.

To run a job again with the same parameters that it was run previously, it can be force updated with the --force flag.

Parent command

Command Description
docker service Manage services
Command Description
docker service create Create a new service
docker service inspect Display detailed information on one or more services
docker service logs Fetch the logs of a service or task
docker service ls List services
docker service ps List the tasks of one or more services
docker service rm Remove one or more services
docker service rollback Revert changes to a service’s configuration
docker service scale Scale one or multiple replicated services
docker service update Update a service
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/service_update/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack%2Findex.html new file mode 100644 index 00000000..afb382fb --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack%2Findex.html @@ -0,0 +1,9 @@ +

docker stack


Manage Docker stacks

Usage

$ docker stack [OPTIONS] COMMAND
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Manage stacks.

Options

Name, shorthand Default Description
--kubeconfig +deprecatedKubernetes
Kubernetes config file
--orchestrator +deprecated
Orchestrator to use (swarm|kubernetes|all)

Child commands

Command Description
docker stack deploy Deploy a new stack or update an existing stack
docker stack ls List stacks
docker stack ps List the tasks in the stack
docker stack rm Remove one or more stacks
docker stack services List the services in the stack
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stack/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_deploy%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_deploy%2Findex.html new file mode 100644 index 00000000..d82ce2ca --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_deploy%2Findex.html @@ -0,0 +1,60 @@ +

docker stack deploy


Deploy a new stack or update an existing stack

Usage

$ docker stack deploy [OPTIONS] STACK
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Create and update a stack from a compose file on the swarm.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--compose-file , -c + Path to a Compose file, or "-" to read from stdin
--namespace +deprecatedKubernetes
Kubernetes namespace to use
--prune +Swarm
Prune services that are no longer referenced
--resolve-image always +Swarm
Query the registry to resolve image digest and supported platforms ("always"|"changed"|"never")
--with-registry-auth +Swarm
Send registry authentication details to Swarm agents
--kubeconfig +deprecatedKubernetes
Kubernetes config file
--orchestrator +deprecated
Orchestrator to use (swarm|kubernetes|all)

Examples

Compose file

The deploy command supports compose file version 3.0 and above.

$ docker stack deploy --compose-file docker-compose.yml vossibility
+
+Ignoring unsupported options: links
+
+Creating network vossibility_vossibility
+Creating network vossibility_default
+Creating service vossibility_nsqd
+Creating service vossibility_logstash
+Creating service vossibility_elasticsearch
+Creating service vossibility_kibana
+Creating service vossibility_ghollector
+Creating service vossibility_lookupd
+

The Compose file can also be provided as standard input with --compose-file -:

$ cat docker-compose.yml | docker stack deploy --compose-file - vossibility
+
+Ignoring unsupported options: links
+
+Creating network vossibility_vossibility
+Creating network vossibility_default
+Creating service vossibility_nsqd
+Creating service vossibility_logstash
+Creating service vossibility_elasticsearch
+Creating service vossibility_kibana
+Creating service vossibility_ghollector
+Creating service vossibility_lookupd
+

If your configuration is split between multiple Compose files, e.g. a base configuration and environment-specific overrides, you can provide multiple --compose-file flags.

$ docker stack deploy --compose-file docker-compose.yml -c docker-compose.prod.yml vossibility
+
+Ignoring unsupported options: links
+
+Creating network vossibility_vossibility
+Creating network vossibility_default
+Creating service vossibility_nsqd
+Creating service vossibility_logstash
+Creating service vossibility_elasticsearch
+Creating service vossibility_kibana
+Creating service vossibility_ghollector
+Creating service vossibility_lookupd
+

You can verify that the services were correctly created:

$ docker service ls
+
+ID            NAME                               MODE        REPLICAS  IMAGE
+29bv0vnlm903  vossibility_lookupd                replicated  1/1       nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662
+4awt47624qwh  vossibility_nsqd                   replicated  1/1       nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662
+4tjx9biia6fs  vossibility_elasticsearch          replicated  1/1       elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa
+7563uuzr9eys  vossibility_kibana                 replicated  1/1       kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03
+9gc5m4met4he  vossibility_logstash               replicated  1/1       logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe
+axqh55ipl40h  vossibility_vossibility-collector  replicated  1/1       icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba
+

Parent command

Command Description
docker stack Manage Docker stacks
Command Description
docker stack deploy Deploy a new stack or update an existing stack
docker stack ls List stacks
docker stack ps List the tasks in the stack
docker stack rm Remove one or more stacks
docker stack services List the services in the stack
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stack_deploy/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ls%2Findex.html new file mode 100644 index 00000000..423b37d2 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ls%2Findex.html @@ -0,0 +1,19 @@ +

docker stack ls


List stacks

Usage

$ docker stack ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists the stacks.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--all-namespaces +deprecatedKubernetes
List stacks from all Kubernetes namespaces
--format Pretty-print stacks using a Go template
--namespace +deprecatedKubernetes
Kubernetes namespaces to use
--kubeconfig +deprecatedKubernetes
Kubernetes config file
--orchestrator +deprecated
Orchestrator to use (swarm|kubernetes|all)

Examples

The following command shows all stacks and some additional information:

$ docker stack ls
+
+ID                 SERVICES            ORCHESTRATOR
+myapp              2                   Kubernetes
+vossibility-stack  6                   Swarm
+

Formatting

The formatting option (--format) pretty-prints stacks using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.Name Stack name
.Services Number of services
.Orchestrator Orchestrator name
.Namespace Namespace

When using the --format option, the stack ls command either outputs the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the Name and Services entries separated by a colon (:) for all stacks:

$ docker stack ls --format "{{.Name}}: {{.Services}}"
+web-server: 1
+web-cache: 4
+

Parent command

Command Description
docker stack Manage Docker stacks
Command Description
docker stack deploy Deploy a new stack or update an existing stack
docker stack ls List stacks
docker stack ps List the tasks in the stack
docker stack rm Remove one or more stacks
docker stack services List the services in the stack
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stack_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ps%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ps%2Findex.html new file mode 100644 index 00000000..4405e8c3 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_ps%2Findex.html @@ -0,0 +1,100 @@ +

docker stack ps


List the tasks in the stack

Usage

$ docker stack ps [OPTIONS] STACK
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists the tasks that are running as part of the specified stack.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print tasks using a Go template
--namespace +deprecatedKubernetes
Kubernetes namespace to use
--no-resolve Do not map IDs to Names
--no-trunc Do not truncate output
+--quiet , -q + Only display task IDs
--kubeconfig +deprecatedKubernetes
Kubernetes config file
--orchestrator +deprecated
Orchestrator to use (swarm|kubernetes|all)

Examples

List the tasks that are part of a stack

The following command shows all the tasks that are part of the voting stack:

$ docker stack ps voting
+
+ID                  NAME                  IMAGE                                          NODE   DESIRED STATE  CURRENT STATE          ERROR  PORTS
+xim5bcqtgk1b        voting_worker.1       dockersamples/examplevotingapp_worker:latest   node2  Running        Running 2 minutes ago
+q7yik0ks1in6        voting_result.1       dockersamples/examplevotingapp_result:before   node1  Running        Running 2 minutes ago
+rx5yo0866nfx        voting_vote.1         dockersamples/examplevotingapp_vote:before     node3  Running        Running 2 minutes ago
+tz6j82jnwrx7        voting_db.1           postgres:9.4                                   node1  Running        Running 2 minutes ago
+w48spazhbmxc        voting_redis.1        redis:alpine                                   node2  Running        Running 3 minutes ago
+6jj1m02freg1        voting_visualizer.1   dockersamples/visualizer:stable                node1  Running        Running 2 minutes ago
+kqgdmededccb        voting_vote.2         dockersamples/examplevotingapp_vote:before     node2  Running        Running 2 minutes ago
+t72q3z038jeh        voting_redis.2        redis:alpine                                   node3  Running        Running 3 minutes ago
+

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g. --filter "foo=bar" --filter "bif=baz"). Multiple filter flags are combined as an OR filter. For example, -f name=redis.1 -f name=redis.7 returns both redis.1 and redis.7 tasks.

The currently supported filters are:

id

The id filter matches on all or a prefix of a task’s ID.

$ docker stack ps -f "id=t" voting
+
+ID                  NAME                IMAGE               NODE         DESIRED STATE       CURRENTSTATE            ERROR  PORTS
+tz6j82jnwrx7        voting_db.1         postgres:9.4        node1        Running             Running 14 minutes ago
+t72q3z038jeh        voting_redis.2      redis:alpine        node3        Running             Running 14 minutes ago
+

name

The name filter matches on task names.

$ docker stack ps -f "name=voting_redis" voting
+
+ID                  NAME                IMAGE               NODE         DESIRED STATE       CURRENTSTATE            ERROR  PORTS
+w48spazhbmxc        voting_redis.1      redis:alpine        node2        Running             Running 17 minutes ago
+t72q3z038jeh        voting_redis.2      redis:alpine        node3        Running             Running 17 minutes ago
+

node

The node filter matches on a node name or a node ID.

$ docker stack ps -f "node=node1" voting
+
+ID                  NAME                  IMAGE                                          NODE   DESIRED STATE  CURRENT STATE          ERROR  PORTS
+q7yik0ks1in6        voting_result.1       dockersamples/examplevotingapp_result:before   node1  Running        Running 18 minutes ago
+tz6j82jnwrx7        voting_db.1           postgres:9.4                                   node1  Running        Running 18 minutes ago
+6jj1m02freg1        voting_visualizer.1   dockersamples/visualizer:stable                node1  Running        Running 18 minutes ago
+

desired-state

The desired-state filter can take the values running, shutdown, ready or accepted.

$ docker stack ps -f "desired-state=running" voting
+
+ID                  NAME                  IMAGE                                          NODE   DESIRED STATE  CURRENT STATE           ERROR  PORTS
+xim5bcqtgk1b        voting_worker.1       dockersamples/examplevotingapp_worker:latest   node2  Running        Running 21 minutes ago
+q7yik0ks1in6        voting_result.1       dockersamples/examplevotingapp_result:before   node1  Running        Running 21 minutes ago
+rx5yo0866nfx        voting_vote.1         dockersamples/examplevotingapp_vote:before     node3  Running        Running 21 minutes ago
+tz6j82jnwrx7        voting_db.1           postgres:9.4                                   node1  Running        Running 21 minutes ago
+w48spazhbmxc        voting_redis.1        redis:alpine                                   node2  Running        Running 21 minutes ago
+6jj1m02freg1        voting_visualizer.1   dockersamples/visualizer:stable                node1  Running        Running 21 minutes ago
+kqgdmededccb        voting_vote.2         dockersamples/examplevotingapp_vote:before     node2  Running        Running 21 minutes ago
+t72q3z038jeh        voting_redis.2        redis:alpine                                   node3  Running        Running 21 minutes ago
+

Formatting

The formatting options (--format) pretty-prints tasks output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Task ID
.Name Task name
.Image Task image
.Node Node ID
.DesiredState Desired state of the task (running, shutdown, or accepted)
.CurrentState Current state of the task
.Error Error
.Ports Task published ports

When using the --format option, the stack ps command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the Name and Image entries separated by a colon (:) for all tasks:

$ docker stack ps --format "{{.Name}}: {{.Image}}" voting
+
+voting_worker.1: dockersamples/examplevotingapp_worker:latest
+voting_result.1: dockersamples/examplevotingapp_result:before
+voting_vote.1: dockersamples/examplevotingapp_vote:before
+voting_db.1: postgres:9.4
+voting_redis.1: redis:alpine
+voting_visualizer.1: dockersamples/visualizer:stable
+voting_vote.2: dockersamples/examplevotingapp_vote:before
+voting_redis.2: redis:alpine
+

Do not map IDs to Names

The --no-resolve option shows IDs for task name, without mapping IDs to Names.

$ docker stack ps --no-resolve voting
+
+ID                  NAME                          IMAGE                                          NODE                        DESIRED STATE  CURRENT STATE            ERROR  PORTS
+xim5bcqtgk1b        10z9fjfqzsxnezo4hb81p8mqg.1   dockersamples/examplevotingapp_worker:latest   qaqt4nrzo775jrx6detglho01   Running        Running 30 minutes ago
+q7yik0ks1in6        hbxltua1na7mgqjnidldv5m65.1   dockersamples/examplevotingapp_result:before   mxpaef1tlh23s052erw88a4w5   Running        Running 30 minutes ago
+rx5yo0866nfx        qyprtqw1g5nrki557i974ou1d.1   dockersamples/examplevotingapp_vote:before     kanqcxfajd1r16wlnqcblobmm   Running        Running 31 minutes ago
+tz6j82jnwrx7        122f0xxngg17z52be7xspa72x.1   postgres:9.4                                   mxpaef1tlh23s052erw88a4w5   Running        Running 31 minutes ago
+w48spazhbmxc        tg61x8myx563ueo3urmn1ic6m.1   redis:alpine                                   qaqt4nrzo775jrx6detglho01   Running        Running 31 minutes ago
+6jj1m02freg1        8cqlyi444kzd3panjb7edh26v.1   dockersamples/visualizer:stable                mxpaef1tlh23s052erw88a4w5   Running        Running 31 minutes ago
+kqgdmededccb        qyprtqw1g5nrki557i974ou1d.2   dockersamples/examplevotingapp_vote:before     qaqt4nrzo775jrx6detglho01   Running        Running 31 minutes ago
+t72q3z038jeh        tg61x8myx563ueo3urmn1ic6m.2   redis:alpine                                   kanqcxfajd1r16wlnqcblobmm   Running        Running 31 minutes ago
+

Do not truncate output

When deploying a service, docker resolves the digest for the service’s image, and pins the service to that digest. The digest is not shown by default, but is printed if --no-trunc is used. The --no-trunc option also shows the non-truncated task IDs, and error-messages, as can be seen below:

$ docker stack ps --no-trunc voting
+
+ID                          NAME                  IMAGE                                                                                                                 NODE   DESIRED STATE  CURREN STATE           ERROR  PORTS
+xim5bcqtgk1bxqz91jzo4a1s5   voting_worker.1       dockersamples/examplevotingapp_worker:latest@sha256:3e4ddf59c15f432280a2c0679c4fc5a2ee5a797023c8ef0d3baf7b1385e9fed   node2  Running        Runnin 32 minutes ago
+q7yik0ks1in6kv32gg6y6yjf7   voting_result.1       dockersamples/examplevotingapp_result:before@sha256:83b56996e930c292a6ae5187fda84dd6568a19d97cdb933720be15c757b7463   node1  Running        Runnin 32 minutes ago
+rx5yo0866nfxc58zf4irsss6n   voting_vote.1         dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6     node3  Running        Runnin 32 minutes ago
+tz6j82jnwrx7n2offljp3mn03   voting_db.1           postgres:9.4@sha256:6046af499eae34d2074c0b53f9a8b404716d415e4a03e68bc1d2f8064f2b027                                   node1  Running        Runnin 32 minutes ago
+w48spazhbmxcmbjfi54gs7x90   voting_redis.1        redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7                                   node2  Running        Runnin 32 minutes ago
+6jj1m02freg1n3z9n1evrzsbl   voting_visualizer.1   dockersamples/visualizer:stable@sha256:f924ad66c8e94b10baaf7bdb9cd491ef4e982a1d048a56a17e02bf5945401e5                node1  Running        Runnin 32 minutes ago
+kqgdmededccbhz2wuc0e9hx7g   voting_vote.2         dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6     node2  Running        Runnin 32 minutes ago
+t72q3z038jehe1wbh9gdum076   voting_redis.2        redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7                                   node3  Running        Runnin 32 minutes ago
+

Only display task IDs

The -q or --quiet option only shows IDs of the tasks in the stack. This example outputs all task IDs of the “voting” stack;

$ docker stack ps -q voting
+xim5bcqtgk1b
+q7yik0ks1in6
+rx5yo0866nfx
+tz6j82jnwrx7
+w48spazhbmxc
+6jj1m02freg1
+kqgdmededccb
+t72q3z038jeh
+

This option can be used to perform batch operations. For example, you can use the task IDs as input for other commands, such as docker inspect. The following example inspects all tasks of the “voting” stack;

$ docker inspect $(docker stack ps -q voting)
+
+[
+    {
+        "ID": "xim5bcqtgk1b1gk0krq1",
+        "Version": {
+<...>
+

Parent command

Command Description
docker stack Manage Docker stacks
Command Description
docker stack deploy Deploy a new stack or update an existing stack
docker stack ls List stacks
docker stack ps List the tasks in the stack
docker stack rm Remove one or more stacks
docker stack services List the services in the stack
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stack_ps/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_rm%2Findex.html new file mode 100644 index 00000000..e31d003d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_rm%2Findex.html @@ -0,0 +1,32 @@ +

docker stack rm


Remove one or more stacks

Usage

$ docker stack rm [OPTIONS] STACK [STACK...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Remove the stack from the swarm.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--namespace +deprecatedKubernetes
Kubernetes namespace to use
--kubeconfig +deprecatedKubernetes
Kubernetes config file
--orchestrator +deprecated
Orchestrator to use (swarm|kubernetes|all)

Examples

Remove a stack

This will remove the stack with the name myapp. Services, networks, and secrets associated with the stack will be removed.

$ docker stack rm myapp
+
+Removing service myapp_redis
+Removing service myapp_web
+Removing service myapp_lb
+Removing network myapp_default
+Removing network myapp_frontend
+

Remove multiple stacks

This will remove all the specified stacks, myapp and vossibility. Services, networks, and secrets associated with all the specified stacks will be removed.

$ docker stack rm myapp vossibility
+
+Removing service myapp_redis
+Removing service myapp_web
+Removing service myapp_lb
+Removing network myapp_default
+Removing network myapp_frontend
+Removing service vossibility_nsqd
+Removing service vossibility_logstash
+Removing service vossibility_elasticsearch
+Removing service vossibility_kibana
+Removing service vossibility_ghollector
+Removing service vossibility_lookupd
+Removing network vossibility_default
+Removing network vossibility_vossibility
+

Parent command

Command Description
docker stack Manage Docker stacks
Command Description
docker stack deploy Deploy a new stack or update an existing stack
docker stack ls List stacks
docker stack ps List the tasks in the stack
docker stack rm Remove one or more stacks
docker stack services List the services in the stack
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stack_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_services%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_services%2Findex.html new file mode 100644 index 00000000..94548d88 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstack_services%2Findex.html @@ -0,0 +1,28 @@ +

docker stack services


List the services in the stack

Usage

$ docker stack services [OPTIONS] STACK
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Lists the services that are running as part of the specified stack.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Pretty-print services using a Go template
--namespace +deprecatedKubernetes
Kubernetes namespace to use
+--quiet , -q + Only display IDs
--kubeconfig +deprecatedKubernetes
Kubernetes config file
--orchestrator +deprecated
Orchestrator to use (swarm|kubernetes|all)

Examples

The following command shows all services in the myapp stack:

$ docker stack services myapp
+
+ID            NAME            REPLICAS  IMAGE                                                                          COMMAND
+7be5ei6sqeye  myapp_web       1/1       nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f
+dn7m7nhhfb9y  myapp_db        1/1       mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539
+

Filtering

The filtering flag (-f or --filter) format is a key=value pair. If there is more than one filter, then pass multiple flags (e.g. --filter "foo=bar" --filter "bif=baz"). Multiple filter flags are combined as an OR filter.

The following command shows both the web and db services:

$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp
+
+ID            NAME            REPLICAS  IMAGE                                                                          COMMAND
+7be5ei6sqeye  myapp_web       1/1       nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f
+dn7m7nhhfb9y  myapp_db        1/1       mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539
+

The currently supported filters are:

Formatting

The formatting options (--format) pretty-prints services output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.ID Service ID
.Name Service name
.Mode Service mode (replicated, global)
.Replicas Service replicas
.Image Service image

When using the --format option, the stack services command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the ID, Mode, and Replicas entries separated by a colon (:) for all services:

$ docker stack services --format "{{.ID}}: {{.Mode}} {{.Replicas}}"
+
+0zmvwuiu3vue: replicated 10/10
+fm6uf97exkul: global 5/5
+

Parent command

Command Description
docker stack Manage Docker stacks
Command Description
docker stack deploy Deploy a new stack or update an existing stack
docker stack ls List stacks
docker stack ps List the tasks in the stack
docker stack rm Remove one or more stacks
docker stack services List the services in the stack
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stack_services/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstart%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstart%2Findex.html new file mode 100644 index 00000000..79837ac8 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstart%2Findex.html @@ -0,0 +1,14 @@ +

docker start


Start one or more stopped containers

Usage

$ docker start [OPTIONS] CONTAINER [CONTAINER...]
+

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--attach , -a + Attach STDOUT/STDERR and forward signals
--checkpoint +experimental (daemon)
Restore from this checkpoint
--checkpoint-dir +experimental (daemon)
Use a custom checkpoint storage directory
--detach-keys Override the key sequence for detaching a container
+--interactive , -i + Attach container's STDIN

Examples

$ docker start my_container
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/start/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstats%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstats%2Findex.html new file mode 100644 index 00000000..16b8cd99 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstats%2Findex.html @@ -0,0 +1,62 @@ +

docker stats


Display a live stream of container(s) resource usage statistics

Usage

$ docker stats [OPTIONS] [CONTAINER...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker stats command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data.

If you need more detailed information about a container’s resource usage, use the /containers/(id)/stats API endpoint.

Note

On Linux, the Docker CLI reports memory usage by subtracting cache usage from the total memory usage. The API does not perform such a calculation but rather provides the total memory usage and the amount from the cache so that clients can use the data as needed. The cache usage is defined as the value of total_inactive_file field in the memory.stat file on cgroup v1 hosts.

On Docker 19.03 and older, the cache usage was defined as the value of cache field. On cgroup v2 hosts, the cache usage is defined as the value of inactive_file field.

Note

The PIDS column contains the number of processes and kernel threads created by that container. Threads is the term used by Linux kernel. Other equivalent terms are “lightweight process” or “kernel task”, etc. A large number in the PIDS column combined with a small number of processes (as reported by ps or top) may indicate that something in the container is creating many threads.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--all , -a + Show all containers (default shows just running)
--format Pretty-print images using a Go template
--no-stream Disable streaming stats and only pull the first result
--no-trunc Do not truncate output

Examples

Running docker stats on all running containers against a Linux daemon.

$ docker stats
+
+CONTAINER ID        NAME                                    CPU %               MEM USAGE / LIMIT     MEM %               NET I/O             BLOCK I/O           PIDS
+b95a83497c91        awesome_brattain                        0.28%               5.629MiB / 1.952GiB   0.28%               916B / 0B           147kB / 0B          9
+67b2525d8ad1        foobar                                  0.00%               1.727MiB / 1.952GiB   0.09%               2.48kB / 0B         4.11MB / 0B         2
+e5c383697914        test-1951.1.kay7x1lh1twk9c0oig50sd5tr   0.00%               196KiB / 1.952GiB     0.01%               71.2kB / 0B         770kB / 0B          1
+4bda148efbc0        random.1.vnc8on831idyr42slu578u3cr      0.00%               1.672MiB / 1.952GiB   0.08%               110kB / 0B          578kB / 0B          2
+

If you don’t specify a format string using --format, the following columns are shown.

Column name Description
+CONTAINER ID and Name + the ID and name of the container
+CPU % and MEM % + the percentage of the host’s CPU and memory the container is using
MEM USAGE / LIMIT the total memory the container is using, and the total amount of memory it is allowed to use
NET I/O The amount of data the container has sent and received over its network interface
BLOCK I/O The amount of data the container has read to and written from block devices on the host
PIDs the number of processes or threads the container has created

Running docker stats on multiple containers by name and id against a Linux daemon.

$ docker stats awesome_brattain 67b2525d8ad1
+
+CONTAINER ID        NAME                CPU %               MEM USAGE / LIMIT     MEM %               NET I/O             BLOCK I/O           PIDS
+b95a83497c91        awesome_brattain    0.28%               5.629MiB / 1.952GiB   0.28%               916B / 0B           147kB / 0B          9
+67b2525d8ad1        foobar              0.00%               1.727MiB / 1.952GiB   0.09%               2.48kB / 0B         4.11MB / 0B         2
+

Running docker stats on container with name nginx and getting output in json format.

$ docker stats nginx --no-stream --format "{{ json . }}"
+{"BlockIO":"0B / 13.3kB","CPUPerc":"0.03%","Container":"nginx","ID":"ed37317fbf42","MemPerc":"0.24%","MemUsage":"2.352MiB / 982.5MiB","Name":"nginx","NetIO":"539kB / 606kB","PIDs":"2"}
+

Running docker stats with customized format on all (Running and Stopped) containers.

$ docker stats --all --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" fervent_panini 5acfcb1b4fd1 drunk_visvesvaraya big_heisenberg
+
+CONTAINER                CPU %               MEM USAGE / LIMIT
+fervent_panini           0.00%               56KiB / 15.57GiB
+5acfcb1b4fd1             0.07%               32.86MiB / 15.57GiB
+drunk_visvesvaraya       0.00%               0B / 0B
+big_heisenberg           0.00%               0B / 0B
+

drunk_visvesvaraya and big_heisenberg are stopped containers in the above example.

Running docker stats on all running containers against a Windows daemon.

PS E:\> docker stats
+CONTAINER ID        CPU %               PRIV WORKING SET    NET I/O             BLOCK I/O
+09d3bb5b1604        6.61%               38.21 MiB           17.1 kB / 7.73 kB   10.7 MB / 3.57 MB
+9db7aa4d986d        9.19%               38.26 MiB           15.2 kB / 7.65 kB   10.6 MB / 3.3 MB
+3f214c61ad1d        0.00%               28.64 MiB           64 kB / 6.84 kB     4.42 MB / 6.93 MB
+

Running docker stats on multiple containers by name and id against a Windows daemon.

PS E:\> docker ps -a
+CONTAINER ID        NAME                IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+3f214c61ad1d        awesome_brattain    nanoserver          "cmd"               2 minutes ago       Up 2 minutes                            big_minsky
+9db7aa4d986d        mad_wilson          windowsservercore   "cmd"               2 minutes ago       Up 2 minutes                            mad_wilson
+09d3bb5b1604        fervent_panini      windowsservercore   "cmd"               2 minutes ago       Up 2 minutes                            affectionate_easley
+
+PS E:\> docker stats 3f214c61ad1d mad_wilson
+CONTAINER ID        NAME                CPU %               PRIV WORKING SET    NET I/O             BLOCK I/O
+3f214c61ad1d        awesome_brattain    0.00%               46.25 MiB           76.3 kB / 7.92 kB   10.3 MB / 14.7 MB
+9db7aa4d986d        mad_wilson          9.59%               40.09 MiB           27.6 kB / 8.81 kB   17 MB / 20.1 MB
+

Formatting

The formatting option (--format) pretty prints container output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.Container Container name or ID (user input)
.Name Container name
.ID Container ID
.CPUPerc CPU percentage
.MemUsage Memory usage
.NetIO Network IO
.BlockIO Block IO
.MemPerc Memory percentage (Not available on Windows)
.PIDs Number of PIDs (Not available on Windows)

When using the --format option, the stats command either outputs the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the Container and CPUPerc entries separated by a colon (:) for all images:

$ docker stats --format "{{.Container}}: {{.CPUPerc}}"
+
+09d3bb5b1604: 6.61%
+9db7aa4d986d: 9.19%
+3f214c61ad1d: 0.00%
+

To list all containers statistics with their name, CPU percentage and memory usage in a table format you can use:

$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}"
+
+CONTAINER           CPU %               PRIV WORKING SET
+1285939c1fd3        0.07%               796 KiB / 64 MiB
+9c76f7834ae2        0.07%               2.746 MiB / 64 MiB
+d1ea048f04e4        0.03%               4.583 MiB / 64 MiB
+

The default format is as follows:

On Linux:

"table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}"
+

On Windows:

"table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}"
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stats/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fstop%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstop%2Findex.html new file mode 100644 index 00000000..785bc9bf --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fstop%2Findex.html @@ -0,0 +1,10 @@ +

docker stop


Stop one or more running containers

Usage

$ docker stop [OPTIONS] CONTAINER [CONTAINER...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL. The first signal can be changed with the STOPSIGNAL instruction in the container’s Dockerfile, or the --stop-signal option to docker run.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--time , -t + 10 Seconds to wait for stop before killing it

Examples

$ docker stop my_container
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/stop/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm%2Findex.html new file mode 100644 index 00000000..77851c84 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm%2Findex.html @@ -0,0 +1,7 @@ +

docker swarm


Manage Swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm COMMAND
+

Description

Manage the swarm.

Child commands

Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_ca%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_ca%2Findex.html new file mode 100644 index 00000000..be8867ae --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_ca%2Findex.html @@ -0,0 +1,41 @@ +

docker swarm ca


Display and rotate the root CA

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm ca [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

View or rotate the current swarm CA certificate.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--ca-cert Path to the PEM-formatted root CA certificate to use for the new cluster
--ca-key Path to the PEM-formatted root CA key to use for the new cluster
--cert-expiry 2160h0m0s Validity period for node certificates (ns|us|ms|s|m|h)
+--detach , -d + Exit immediately instead of waiting for the root rotation to converge
--external-ca Specifications of one or more certificate signing endpoints
+--quiet , -q + Suppress progress output
--rotate Rotate the swarm CA - if no certificate or key are provided, new ones will be generated

Examples

Run the docker swarm ca command without any options to view the current root CA certificate in PEM format.

$ docker swarm ca
+
+-----BEGIN CERTIFICATE-----
+MIIBazCCARCgAwIBAgIUJPzo67QC7g8Ebg2ansjkZ8CbmaswCgYIKoZIzj0EAwIw
+EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAzMTcxMDAwWhcNMzcwNDI4MTcx
+MDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
+A0IABKL6/C0sihYEb935wVPRA8MqzPLn3jzou0OJRXHsCLcVExigrMdgmLCC+Va4
++sJ+SLVO1eQbvLHH8uuDdF/QOU6jQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MB0GA1UdDgQWBBSfUy5bjUnBAx/B0GkOBKp91XvxzjAKBggqhkjO
+PQQDAgNJADBGAiEAnbvh0puOS5R/qvy1PMHY1iksYKh2acsGLtL/jAIvO4ACIQCi
+lIwQqLkJ48SQqCjG1DBTSBsHmMSRT+6mE2My+Z3GKA==
+-----END CERTIFICATE-----
+

Pass the --rotate flag (and optionally a --ca-cert, along with a --ca-key or --external-ca parameter flag), in order to rotate the current swarm root CA.

$ docker swarm ca --rotate
+desired root digest: sha256:05da740cf2577a25224c53019e2cce99bcc5ba09664ad6bb2a9425d9ebd1b53e
+  rotated TLS certificates:  [=========================>                         ] 1/2 nodes
+  rotated CA certificates:   [>                                                  ] 0/2 nodes
+

Once the rotation os finished (all the progress bars have completed) the now-current CA certificate will be printed:

$ docker swarm ca --rotate
+desired root digest: sha256:05da740cf2577a25224c53019e2cce99bcc5ba09664ad6bb2a9425d9ebd1b53e
+  rotated TLS certificates:  [==================================================>] 2/2 nodes
+  rotated CA certificates:   [==================================================>] 2/2 nodes
+-----BEGIN CERTIFICATE-----
+MIIBazCCARCgAwIBAgIUFynG04h5Rrl4lKyA4/E65tYKg8IwCgYIKoZIzj0EAwIw
+EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTE2MDAxMDAwWhcNMzcwNTExMDAx
+MDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
+A0IABC2DuNrIETP7C7lfiEPk39tWaaU0I2RumUP4fX4+3m+87j0DU0CsemUaaOG6
++PxHhGu2VXQ4c9pctPHgf7vWeVajQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MB0GA1UdDgQWBBSEL02z6mCI3SmMDmITMr12qCRY2jAKBggqhkjO
+PQQDAgNJADBGAiEA263Eb52+825EeNQZM0AME+aoH1319Zp9/J5ijILW+6ACIQCg
+gyg5u9Iliel99l7SuMhNeLkrU7fXs+Of1nTyyM73ig==
+-----END CERTIFICATE-----
+

--rotate

Root CA Rotation is recommended if one or more of the swarm managers have been compromised, so that those managers can no longer connect to or be trusted by any other node in the cluster.

Alternately, root CA rotation can be used to give control of the swarm CA to an external CA, or to take control back from an external CA.

The --rotate flag does not require any parameters to do a rotation, but you can optionally specify a certificate and key, or a certificate and external CA URL, and those will be used instead of an automatically-generated certificate/key pair.

Because the root CA key should be kept secret, if provided it will not be visible when viewing swarm any information via the CLI or API.

The root CA rotation will not be completed until all registered nodes have rotated their TLS certificates. If the rotation is not completing within a reasonable amount of time, try running docker node ls --format '{{.ID}} {{.Hostname}} {{.Status}} {{.TLSStatus}}' to see if any nodes are down or otherwise unable to rotate TLS certificates.

--detach

Initiate the root CA rotation, but do not wait for the completion of or display the progress of the rotation.

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_ca/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_init%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_init%2Findex.html new file mode 100644 index 00000000..1da2a375 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_init%2Findex.html @@ -0,0 +1,29 @@ +

docker swarm init


Initialize a swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm init [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Initialize a swarm. The docker engine targeted by this command becomes a manager in the newly created single-node swarm.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--advertise-addr Advertised address (format: <ip|interface>[:port])
--autolock Enable manager autolocking (requiring an unlock key to start a stopped manager)
--availability active Availability of the node ("active"|"pause"|"drain")
--cert-expiry 2160h0m0s Validity period for node certificates (ns|us|ms|s|m|h)
--data-path-addr Address or interface to use for data path traffic (format: <ip|interface>)
--data-path-port +API 1.40+
Port number to use for data path traffic (1024 - 49151). If no value is set or is set to 0, the default port (4789) is used.
--default-addr-pool +API 1.40+
default address pool in CIDR format
--default-addr-pool-mask-length 24 +API 1.40+
default address pool subnet mask length
--dispatcher-heartbeat 5s Dispatcher heartbeat period (ns|us|ms|s|m|h)
--external-ca Specifications of one or more certificate signing endpoints
--force-new-cluster Force create a new cluster from current state
--listen-addr 0.0.0.0:2377 Listen address (format: <ip|interface>[:port])
--max-snapshots Number of additional Raft snapshots to retain
--snapshot-interval 10000 Number of log entries between Raft snapshots
--task-history-limit 5 Task history retention limit

Examples

$ docker swarm init --advertise-addr 192.168.99.121
+
+Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager.
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \
+    172.17.0.2:2377
+
+To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
+

docker swarm init generates two random tokens, a worker token and a manager token. When you join a new node to the swarm, the node joins as a worker or manager node based upon the token you pass to swarm join.

After you create the swarm, you can display or rotate the token using swarm join-token.

--autolock

This flag enables automatic locking of managers with an encryption key. The private keys and data stored by all managers will be protected by the encryption key printed in the output, and will not be accessible without it. Thus, it is very important to store this key in order to activate a manager after it restarts. The key can be passed to docker swarm unlock to reactivate the manager. Autolock can be disabled by running docker swarm update --autolock=false. After disabling it, the encryption key is no longer required to start the manager, and it will start up on its own without user intervention.

--cert-expiry

This flag sets the validity period for node certificates.

--dispatcher-heartbeat

This flag sets the frequency with which nodes are told to use as a period to report their health.

--external-ca

This flag sets up the swarm to use an external CA to issue node certificates. The value takes the form protocol=X,url=Y. The value for protocol specifies what protocol should be used to send signing requests to the external CA. Currently, the only supported value is cfssl. The URL specifies the endpoint where signing requests should be submitted.

--force-new-cluster

This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data.

--listen-addr

The node listens for inbound swarm manager traffic on this address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface’s address; for example --listen-addr eth0:2377.

Specifying a port is optional. If the value is a bare IP address or interface name, the default port 2377 will be used.

--advertise-addr

This flag specifies the address that will be advertised to other members of the swarm for API access and overlay networking. If unspecified, Docker will check if the system has a single IP address, and use that IP address with the listening port (see --listen-addr). If the system has multiple IP addresses, --advertise-addr must be specified so that the correct address is chosen for inter-manager communication and overlay networking.

It is also possible to specify a network interface to advertise that interface’s address; for example --advertise-addr eth0:2377.

Specifying a port is optional. If the value is a bare IP address or interface name, the default port 2377 will be used.

--data-path-addr

This flag specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is then possible to separate the container’s data traffic from the management traffic of the cluster. If unspecified, Docker will use the same IP address or interface that is used for the advertise address.

--data-path-port

This flag allows you to configure the UDP port number to use for data path traffic. The provided port number must be within the 1024 - 49151 range. If this flag is not set or is set to 0, the default port number 4789 is used. The data path port can only be configured when initializing the swarm, and applies to all nodes that join the swarm. The following example initializes a new Swarm, and configures the data path port to UDP port 7777;

$ docker swarm init --data-path-port=7777
+

After the swarm is initialized, use the docker info command to verify that the port is configured:

$ docker info
+<...>
+ClusterID: 9vs5ygs0gguyyec4iqf2314c0
+Managers: 1
+Nodes: 1
+Data Path Port: 7777
+<...>
+

--default-addr-pool

This flag specifies default subnet pools for global scope networks. Format example is --default-addr-pool 30.30.0.0/16 --default-addr-pool 40.40.0.0/16

--default-addr-pool-mask-length

This flag specifies default subnet pools mask length for default-addr-pool. Format example is --default-addr-pool-mask-length 24

--task-history-limit

This flag sets up task history retention limit.

--max-snapshots

This flag sets the number of old Raft snapshots to retain in addition to the current Raft snapshots. By default, no old snapshots are retained. This option may be used for debugging, or to store old snapshots of the swarm state for disaster recovery purposes.

--snapshot-interval

This flag specifies how many log entries to allow in between Raft snapshots. Setting this to a higher number will trigger snapshots less frequently. Snapshots compact the Raft log and allow for more efficient transfer of the state to new managers. However, there is a performance cost to taking snapshots frequently.

--availability

This flag specifies the availability of the node at the time the node joins a master. Possible availability values are active, pause, or drain.

This flag is useful in certain situations. For example, a cluster may want to have dedicated manager nodes that are not served as worker nodes. This could be achieved by passing --availability=drain to docker swarm init.

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_init/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join%2Findex.html new file mode 100644 index 00000000..f7f31810 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join%2Findex.html @@ -0,0 +1,22 @@ +

docker swarm join


Join a swarm as a node and/or manager

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm join [OPTIONS] HOST:PORT
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Join a node to a swarm. The node joins as a manager node or worker node based upon the token you pass with the --token flag. If you pass a manager token, the node joins as a manager. If you pass a worker token, the node joins as a worker.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--advertise-addr Advertised address (format: <ip|interface>[:port])
--availability active Availability of the node ("active"|"pause"|"drain")
--data-path-addr Address or interface to use for data path traffic (format: <ip|interface>)
--listen-addr 0.0.0.0:2377 Listen address (format: <ip|interface>[:port])
--token Token for entry into the swarm

Examples

Join a node to swarm as a manager

The example below demonstrates joining a manager node using a manager token.

$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377
+This node joined a swarm as a manager.
+
+$ docker node ls
+ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
+dkp8vy1dq1kxleu9g4u78tlag *  manager2  Ready   Active        Reachable
+dvfxp4zseq4s0rih1selh0d20    manager1  Ready   Active        Leader
+

A cluster should only have 3-7 managers at most, because a majority of managers must be available for the cluster to function. Nodes that aren’t meant to participate in this management quorum should join as workers instead. Managers should be stable hosts that have static IP addresses.

Join a node to swarm as a worker

The example below demonstrates joining a worker node using a worker token.

$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377
+This node joined a swarm as a worker.
+
+$ docker node ls
+ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
+7ln70fl22uw2dvjn2ft53m3q5    worker2   Ready   Active
+dkp8vy1dq1kxleu9g4u78tlag    worker1   Ready   Active        Reachable
+dvfxp4zseq4s0rih1selh0d20 *  manager1  Ready   Active        Leader
+

--listen-addr value

If the node is a manager, it will listen for inbound swarm manager traffic on this address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface’s address; for example --listen-addr eth0:2377.

Specifying a port is optional. If the value is a bare IP address, or interface name, the default port 2377 will be used.

This flag is generally not necessary when joining an existing swarm.

--advertise-addr value

This flag specifies the address that will be advertised to other members of the swarm for API access. If unspecified, Docker will check if the system has a single IP address, and use that IP address with the listening port (see --listen-addr). If the system has multiple IP addresses, --advertise-addr must be specified so that the correct address is chosen for inter-manager communication and overlay networking.

It is also possible to specify a network interface to advertise that interface’s address; for example --advertise-addr eth0:2377.

Specifying a port is optional. If the value is a bare IP address, or interface name, the default port 2377 will be used.

This flag is generally not necessary when joining an existing swarm. If you’re joining new nodes through a load balancer, you should use this flag to ensure the node advertises its IP address and not the IP address of the load balancer.

--data-path-addr

This flag specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is then possible to separate the container’s data traffic from the management traffic of the cluster. If unspecified, Docker will use the same IP address or interface that is used for the advertise address.

--token string

Secret value required for nodes to join the swarm

--availability

This flag specifies the availability of the node at the time the node joins a master. Possible availability values are active, pause, or drain.

This flag is useful in certain situations. For example, a cluster may want to have dedicated manager nodes that are not served as worker nodes. This could be achieved by passing --availability=drain to docker swarm join.

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_join/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join-token%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join-token%2Findex.html new file mode 100644 index 00000000..11c789c3 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_join-token%2Findex.html @@ -0,0 +1,36 @@ +

docker swarm join-token


Manage join tokens

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm join-token [OPTIONS] (worker|manager)
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Join tokens are secrets that allow a node to join the swarm. There are two different join tokens available, one for the worker role and one for the manager role. You pass the token using the --token flag when you run swarm join. Nodes use the join token only when they join the swarm.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--quiet , -q + Only display token
--rotate Rotate join token

Examples

You can view or rotate the join tokens using swarm join-token.

As a convenience, you can pass worker or manager as an argument to join-token to print the full docker swarm join command to join a new node to the swarm:

$ docker swarm join-token worker
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \
+    172.17.0.2:2377
+
+$ docker swarm join-token manager
+
+To add a manager to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \
+    172.17.0.2:2377
+

Use the --rotate flag to generate a new join token for the specified role:

$ docker swarm join-token --rotate worker
+
+Successfully rotated worker join token.
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \
+    172.17.0.2:2377
+

After using --rotate, only the new token will be valid for joining with the specified role.

The -q (or --quiet) flag only prints the token:

$ docker swarm join-token -q worker
+
+SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t
+

--rotate

Because tokens allow new nodes to join the swarm, you should keep them secret. Be particularly careful with manager tokens since they allow new manager nodes to join the swarm. A rogue manager has the potential to disrupt the operation of your swarm.

Rotate your swarm’s join token if a token gets checked-in to version control, stolen, or a node is compromised. You may also want to periodically rotate the token to ensure any unknown token leaks do not allow a rogue node to join the swarm.

To rotate the join token and print the newly generated token, run docker swarm join-token --rotate and pass the role: manager or worker.

Rotating a join-token means that no new nodes will be able to join the swarm using the old token. Rotation does not affect existing nodes in the swarm because the join token is only used for authorizing new nodes joining the swarm.

--quiet

Only print the token. Do not print a complete command for joining.

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_join-token/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_leave%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_leave%2Findex.html new file mode 100644 index 00000000..8236b88b --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_leave%2Findex.html @@ -0,0 +1,18 @@ +

docker swarm leave


Leave the swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm leave [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

When you run this command on a worker, that worker leaves the swarm.

You can use the --force option on a manager to remove it from the swarm. However, this does not reconfigure the swarm to ensure that there are enough managers to maintain a quorum in the swarm. The safe way to remove a manager from a swarm is to demote it to a worker and then direct it to leave the quorum without using --force. Only use --force in situations where the swarm will no longer be used after the manager leaves, such as in a single-node swarm.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force this node to leave the swarm, ignoring warnings

Examples

Consider the following swarm, as seen from the manager:

$ docker node ls
+
+ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
+7ln70fl22uw2dvjn2ft53m3q5    worker2   Ready   Active
+dkp8vy1dq1kxleu9g4u78tlag    worker1   Ready   Active
+dvfxp4zseq4s0rih1selh0d20 *  manager1  Ready   Active        Leader
+

To remove worker2, issue the following command from worker2 itself:

$ docker swarm leave
+
+Node left the default swarm.
+

The node will still appear in the node list, and marked as down. It no longer affects swarm operation, but a long list of down nodes can clutter the node list. To remove an inactive node from the list, use the node rm command.

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_leave/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock%2Findex.html new file mode 100644 index 00000000..84cabfa2 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock%2Findex.html @@ -0,0 +1,9 @@ +

docker swarm unlock


Unlock swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm unlock
+

Description

Unlocks a locked manager using a user-supplied unlock key. This command must be used to reactivate a manager after its Docker daemon restarts if the autolock setting is turned on. The unlock key is printed at the time when autolock is enabled, and is also available from the docker swarm unlock-key command.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Examples

$ docker swarm unlock
+Please enter unlock key:
+

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_unlock/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock-key%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock-key%2Findex.html new file mode 100644 index 00000000..3bffb034 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_unlock-key%2Findex.html @@ -0,0 +1,32 @@ +

docker swarm unlock-key


Manage the unlock key

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm unlock-key [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

An unlock key is a secret key needed to unlock a manager after its Docker daemon restarts. These keys are only used when the autolock feature is enabled for the swarm.

You can view or rotate the unlock key using swarm unlock-key. To view the key, run the docker swarm unlock-key command without any arguments:

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--quiet , -q + Only display token
--rotate Rotate unlock key

Examples

$ docker swarm unlock-key
+
+To unlock a swarm manager after it restarts, run the `docker swarm unlock`
+command and provide the following key:
+
+    SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4
+
+Please remember to store this key in a password manager, since without it you
+will not be able to restart the manager.
+

Use the --rotate flag to rotate the unlock key to a new, randomly-generated key:

$ docker swarm unlock-key --rotate
+
+Successfully rotated manager unlock key.
+
+To unlock a swarm manager after it restarts, run the `docker swarm unlock`
+command and provide the following key:
+
+    SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8
+
+Please remember to store this key in a password manager, since without it you
+will not be able to restart the manager.
+

The -q (or --quiet) flag only prints the key:

$ docker swarm unlock-key -q
+
+SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8
+

--rotate

This flag rotates the unlock key, replacing it with a new randomly-generated key. The old unlock key will no longer be accepted.

--quiet

Only print the unlock key, without instructions.

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_unlock-key/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_update%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_update%2Findex.html new file mode 100644 index 00000000..152ec78d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fswarm_update%2Findex.html @@ -0,0 +1,8 @@ +

docker swarm update


Update the swarm

Swarm This command works with the Swarm orchestrator.

Usage

$ docker swarm update [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Updates a swarm with new parameter values.

Note

This is a cluster management command, and must be executed on a swarm manager node. To learn about managers and workers, refer to the Swarm mode section in the documentation.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--autolock Change manager autolocking setting (true|false)
--cert-expiry 2160h0m0s Validity period for node certificates (ns|us|ms|s|m|h)
--dispatcher-heartbeat 5s Dispatcher heartbeat period (ns|us|ms|s|m|h)
--external-ca Specifications of one or more certificate signing endpoints
--max-snapshots Number of additional Raft snapshots to retain
--snapshot-interval 10000 Number of log entries between Raft snapshots
--task-history-limit 5 Task history retention limit

Examples

$ docker swarm update --cert-expiry 720h
+

Parent command

Command Description
docker swarm Manage Swarm
Command Description
docker swarm ca Display and rotate the root CA
docker swarm init Initialize a swarm
docker swarm join Join a swarm as a node and/or manager
docker swarm join-token Manage join tokens
docker swarm leave Leave the swarm
docker swarm unlock Unlock swarm
docker swarm unlock-key Manage the unlock key
docker swarm update Update the swarm
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/swarm_update/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem%2Findex.html new file mode 100644 index 00000000..28b34271 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem%2Findex.html @@ -0,0 +1,7 @@ +

docker system


Manage Docker

Usage

$ docker system COMMAND
+

Description

Manage Docker.

Child commands

Command Description
docker system df Show docker disk usage
docker system events Get real time events from the server
docker system info Display system-wide information
docker system prune Remove unused data
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/system/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_df%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_df%2Findex.html new file mode 100644 index 00000000..e56ad554 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_df%2Findex.html @@ -0,0 +1,41 @@ +

docker system df


Show docker disk usage

Usage

$ docker system df [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker system df command displays information regarding the amount of disk space used by the docker daemon.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--format Pretty-print images using a Go template
+--verbose , -v + Show detailed information on space usage

Examples

By default the command will just show a summary of the data used:

$ docker system df
+
+TYPE                TOTAL               ACTIVE              SIZE                RECLAIMABLE
+Images              5                   2                   16.43 MB            11.63 MB (70%)
+Containers          2                   0                   212 B               212 B (100%)
+Local Volumes       2                   1                   36 B                0 B (0%)
+

A more detailed view can be requested using the -v, --verbose flag:

$ docker system df -v
+
+Images space usage:
+
+REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE                SHARED SIZE         UNIQUE SIZE         CONTAINERS
+my-curl             latest              b2789dd875bf        6 minutes ago       11 MB               11 MB               5 B                 0
+my-jq               latest              ae67841be6d0        6 minutes ago       9.623 MB            8.991 MB            632.1 kB            0
+<none>              <none>              a0971c4015c1        6 minutes ago       11 MB               11 MB               0 B                 0
+alpine              latest              4e38e38c8ce0        9 weeks ago         4.799 MB            0 B                 4.799 MB            1
+alpine              3.3                 47cf20d8c26c        9 weeks ago         4.797 MB            4.797 MB            0 B                 1
+
+Containers space usage:
+
+CONTAINER ID        IMAGE               COMMAND             LOCAL VOLUMES       SIZE                CREATED             STATUS                      NAMES
+4a7f7eebae0f        alpine:latest       "sh"                1                   0 B                 16 minutes ago      Exited (0) 5 minutes ago    hopeful_yalow
+f98f9c2aa1ea        alpine:3.3          "sh"                1                   212 B               16 minutes ago      Exited (0) 48 seconds ago   anon-vol
+
+Local Volumes space usage:
+
+NAME                                                               LINKS               SIZE
+07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e   2                   36 B
+my-named-vol                                                       0                   0 B
+

Note

Network information is not shown because it does not consume disk space.

Parent command

Command Description
docker system Manage Docker
Command Description
docker system df Show docker disk usage
docker system events Get real time events from the server
docker system info Display system-wide information
docker system prune Remove unused data
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/system_df/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_events%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_events%2Findex.html new file mode 100644 index 00000000..1d27fadb --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_events%2Findex.html @@ -0,0 +1,146 @@ +

docker system events


Get real time events from the server

Usage

$ docker system events [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Use docker system events to get real-time events from the server. These events differ per Docker object type.

Object types

Containers

Docker containers report the following events:

Images

Docker images report the following events:

Plugins

Docker plugins report the following events:

Volumes

Docker volumes report the following events:

Networks

Docker networks report the following events:

Daemons

Docker daemons report the following events:

Limiting, filtering, and formatting the output

Limit events by time

The --since and --until parameters can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative to the client machine’s time. If you do not provide the --since option, the command returns only new and/or live events. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the client will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long.

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If you would like to use multiple filters, pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

Using the same filter multiple times will be handled as a OR; for example --filter container=588a23dac085 --filter container=a8f7720b8c22 will display events for container 588a23dac085 OR container a8f7720b8c22

Using multiple filters will be handled as a AND; for example --filter container=588a23dac085 --filter event=start will display events for container container 588a23dac085 AND the event type is start

The currently supported filters are:

Format

If a format (--format) is specified, the given template will be executed instead of the default format. Go’s text/template package describes all the details of the format.

If a format is set to {{json .}}, the events are streamed as valid JSON Lines. For information about JSON Lines, please refer to https://jsonlines.org/ .

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Filter output based on conditions provided
--format Format the output using the given Go template
--since Show all events created since timestamp
--until Stream events until this timestamp

Examples

Basic example

You’ll need two shells for this example.

Shell 1: Listening for events:

$ docker system events
+

Shell 2: Start and Stop containers:

$ docker create --name test alpine:latest top
+$ docker start test
+$ docker stop test
+

Shell 1: (Again .. now showing events):

2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+

To exit the docker system events command, use CTRL+C.

Filter events by time

You can filter the output by an absolute timestamp or relative time on the host machine, using the following different time syntaxes:

$ docker system events --since 1483283804
+
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker system events --since '2017-01-05'
+
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker system events --since '2013-09-03T15:49:29'
+
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker system events --since '10m'
+
+2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local)
+2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test)
+2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge)
+2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+

Filter events by criteria

The following commands show several different ways to filter the docker event output.

$ docker system events --filter 'event=stop'
+
+2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain)
+
+$ docker system events --filter 'image=alpine'
+
+2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner)
+2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
+2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15)
+2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9)
+2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner)
+2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner)
+
+$ docker system events --filter 'container=test'
+
+2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15)
+2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test)
+2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker system events --filter 'container=test' --filter 'container=d9cdb1525ea8'
+
+2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test)
+2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner)
+2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9)
+2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test)
+2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test)
+
+$ docker system events --filter 'container=test' --filter 'event=stop'
+
+2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test)
+
+$ docker system events --filter 'type=volume'
+
+2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local)
+2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate)
+2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local)
+2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local)
+
+$ docker system events --filter 'type=network'
+
+2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge)
+2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge)
+
+$ docker system events --filter 'container=container_1' --filter 'container=container_2'
+
+2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04)
+2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04)
+2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8)
+2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8)
+
+$ docker system events --filter 'type=volume'
+
+2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local)
+2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate)
+2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local)
+2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local)
+
+$ docker system events --filter 'type=network'
+
+2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge)
+2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge)
+
+$ docker system events --filter 'type=plugin'
+
+2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
+2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest)
+

Format the output

$ docker system events --filter 'type=container' --format 'Type={{.Type}}  Status={{.Status}}  ID={{.ID}}'
+
+Type=container  Status=create  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=attach  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=start  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=resize  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=die  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+Type=container  Status=destroy  ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26
+

Format as JSON

$ docker system events --format '{{json .}}'
+
+{"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
+{"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
+{"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e..
+{"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42..
+{"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4..
+

Parent command

Command Description
docker system Manage Docker
Command Description
docker system df Show docker disk usage
docker system events Get real time events from the server
docker system info Display system-wide information
docker system prune Remove unused data
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/system_events/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_info%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_info%2Findex.html new file mode 100644 index 00000000..f32810d8 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_info%2Findex.html @@ -0,0 +1,9 @@ +

docker system info


Display system-wide information

Usage

$ docker system info [OPTIONS]
+

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template

Parent command

Command Description
docker system Manage Docker
Command Description
docker system df Show docker disk usage
docker system events Get real time events from the server
docker system info Display system-wide information
docker system prune Remove unused data
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/system_info/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_prune%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_prune%2Findex.html new file mode 100644 index 00000000..1b443d95 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fsystem_prune%2Findex.html @@ -0,0 +1,72 @@ +

docker system prune


Remove unused data

Usage

$ docker system prune [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Remove all unused containers, networks, images (both dangling and unreferenced), and optionally, volumes.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--all , -a + Remove all unused images not just dangling ones
--filter Provide filter values (e.g. 'label=<key>=<value>')
+--force , -f + Do not prompt for confirmation
--volumes Prune volumes

Examples

$ docker system prune
+
+WARNING! This will remove:
+        - all stopped containers
+        - all networks not used by at least one container
+        - all dangling images
+        - all build cache
+Are you sure you want to continue? [y/N] y
+
+Deleted Containers:
+f44f9b81948b3919590d5f79a680d8378f1139b41952e219830a33027c80c867
+792776e68ac9d75bce4092bc1b5cc17b779bc926ab04f4185aec9bf1c0d4641f
+
+Deleted Networks:
+network1
+network2
+
+Deleted Images:
+untagged: hello-world@sha256:f3b3b28a45160805bb16542c9531888519430e9e6d6ffc09d72261b0d26ff74f
+deleted: sha256:1815c82652c03bfd8644afda26fb184f2ed891d921b20a0703b46768f9755c57
+deleted: sha256:45761469c965421a92a69cc50e92c01e0cfa94fe026cdd1233445ea00e96289a
+
+Total reclaimed space: 1.84kB
+

By default, volumes are not removed to prevent important data from being deleted if there is currently no container using the volume. Use the --volumes flag when running the command to prune volumes as well:

$ docker system prune -a --volumes
+
+WARNING! This will remove:
+        - all stopped containers
+        - all networks not used by at least one container
+        - all volumes not used by at least one container
+        - all images without at least one container associated to them
+        - all build cache
+Are you sure you want to continue? [y/N] y
+
+Deleted Containers:
+0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b
+73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d
+
+Deleted Networks:
+my-network-a
+my-network-b
+
+Deleted Volumes:
+named-vol
+
+Deleted Images:
+untagged: my-curl:latest
+deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d
+deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b
+untagged: alpine:3.3
+deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f
+untagged: alpine:latest
+deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96
+deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f
+deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab
+deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3
+untagged: my-jq:latest
+deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1
+deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f
+deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548
+
+Total reclaimed space: 13.5 MB
+

Filtering

The filtering flag (--filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

The until filter can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative to the daemon machine’s time. Supported formats for date formatted time stamps include RFC3339Nano, RFC3339, 2006-01-02T15:04:05, 2006-01-02T15:04:05.999999999, 2006-01-02Z07:00, and 2006-01-02. The local timezone on the daemon will be used if you do not provide either a Z or a +-00:00 timezone offset at the end of the timestamp. When providing Unix timestamps enter seconds[.nanoseconds], where seconds is the number of seconds that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long.

The label filter accepts two formats. One is the label=... (label=<key> or label=<key>=<value>), which removes containers, images, networks, and volumes with the specified labels. The other format is the label!=... (label!=<key> or label!=<key>=<value>), which removes containers, images, networks, and volumes without the specified labels.

Parent command

Command Description
docker system Manage Docker
Command Description
docker system df Show docker disk usage
docker system events Get real time events from the server
docker system info Display system-wide information
docker system prune Remove unused data
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/system_prune/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftag%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftag%2Findex.html new file mode 100644 index 00000000..45d6247a --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftag%2Findex.html @@ -0,0 +1,11 @@ +

docker tag


Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE

Usage

$ docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]
+

Description

An image name is made up of slash-separated name components, optionally prefixed by a registry hostname. The hostname must comply with standard DNS rules, but may not contain underscores. If a hostname is present, it may optionally be followed by a port number in the format :8080. If not present, the command uses Docker’s public registry located at registry-1.docker.io by default. Name components may contain lowercase letters, digits and separators. A separator is defined as a period, one or two underscores, or one or more dashes. A name component may not start or end with a separator.

A tag name must be valid ASCII and may contain lowercase and uppercase letters, digits, underscores, periods and dashes. A tag name may not start with a period or a dash and may contain a maximum of 128 characters.

You can group your images together using names and tags, and then upload them to Share images on Docker Hub.

For example uses of this command, refer to the examples section below.

Examples

Tag an image referenced by ID

To tag a local image with ID “0e5574283393” into the “fedora” repository with “version1.0”:

$ docker tag 0e5574283393 fedora/httpd:version1.0
+

Tag an image referenced by Name

To tag a local image with name “httpd” into the “fedora” repository with “version1.0”:

$ docker tag httpd fedora/httpd:version1.0
+

Note that since the tag name is not specified, the alias is created for an existing local version httpd:latest.

Tag an image referenced by Name and Tag

To tag a local image with name “httpd” and tag “test” into the “fedora” repository with “version1.0.test”:

$ docker tag httpd:test fedora/httpd:version1.0.test
+

Tag an image for a private repository

To push an image to a private registry and not the central Docker registry you must tag it with the registry hostname and port (if needed).

$ docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/tag/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftop%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftop%2Findex.html new file mode 100644 index 00000000..4fedeac5 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftop%2Findex.html @@ -0,0 +1,7 @@ +

docker top


Display the running processes of a container

Usage

$ docker top CONTAINER [ps OPTIONS]
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/top/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust%2Findex.html new file mode 100644 index 00000000..522978f3 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust%2Findex.html @@ -0,0 +1,7 @@ +

docker trust


Manage trust on Docker images

Usage

$ docker trust COMMAND
+

Child commands

Command Description
docker trust inspect Return low-level information about keys and signatures
docker trust key Manage keys for signing Docker images
docker trust revoke Remove trust for an image
docker trust sign Sign an image
docker trust signer Manage entities who can sign Docker images
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_inspect%2Findex.html new file mode 100644 index 00000000..fc6829e2 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_inspect%2Findex.html @@ -0,0 +1,359 @@ +

docker trust inspect


Return low-level information about keys and signatures

Usage

$ docker trust inspect IMAGE[:TAG] [IMAGE[:TAG]...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

docker trust inspect provides low-level JSON information on signed repositories. This includes all image tags that are signed, who signed them, and who can sign new tags.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--pretty Print the information in a human friendly format

Examples

Get low-level details about signatures for a single image tag

Use the docker trust inspect to get trust information about an image. The following example prints trust information for the alpine:latest image:

$ docker trust inspect alpine:latest
+

The output is in JSON format, for example:

[
+  {
+    "Name": "alpine:latest",
+    "SignedTags": [
+      {
+        "SignedTag": "latest",
+        "Digest": "d6bfc3baf615dc9618209a8d607ba2a8103d9c8a405b3bd8741d88b4bef36478",
+        "Signers": [
+          "Repo Admin"
+        ]
+      }
+    ],
+    "Signers": [],
+    "AdministrativeKeys": [
+      {
+        "Name": "Repository",
+        "Keys": [
+            {
+                "ID": "5a46c9aaa82ff150bb7305a2d17d0c521c2d784246807b2dc611f436a69041fd"
+            }
+        ]
+      },
+      {
+        "Name": "Root",
+        "Keys": [
+            {
+                "ID": "a2489bcac7a79aa67b19b96c4a3bf0c675ffdf00c6d2fabe1a5df1115e80adce"
+            }
+        ]
+      }
+    ]
+  }
+]
+

The SignedTags key will list the SignedTag name, its Digest, and the Signers responsible for the signature.

AdministrativeKeys will list the Repository and Root keys.

If signers are set up for the repository via other docker trust commands, docker trust inspect includes a Signers key:

$ docker trust inspect my-image:purple
+

The output is in JSON format, for example:

[
+  {
+    "Name": "my-image:purple",
+    "SignedTags": [
+      {
+        "SignedTag": "purple",
+        "Digest": "941d3dba358621ce3c41ef67b47cf80f701ff80cdf46b5cc86587eaebfe45557",
+        "Signers": [
+          "alice",
+          "bob",
+          "carol"
+        ]
+      }
+    ],
+    "Signers": [
+      {
+        "Name": "alice",
+        "Keys": [
+            {
+                "ID": "04dd031411ed671ae1e12f47ddc8646d98f135090b01e54c3561e843084484a3"
+            },
+            {
+                "ID": "6a11e4898a4014d400332ab0e096308c844584ff70943cdd1d6628d577f45fd8"
+            }
+        ]
+      },
+      {
+        "Name": "bob",
+        "Keys": [
+            {
+                "ID": "433e245c656ae9733cdcc504bfa560f90950104442c4528c9616daa45824ccba"
+            }
+        ]
+      },
+      {
+        "Name": "carol",
+        "Keys": [
+            {
+                "ID": "d32fa8b5ca08273a2880f455fcb318da3dc80aeae1a30610815140deef8f30d9"
+            },
+            {
+                "ID": "9a8bbec6ba2af88a5fad6047d428d17e6d05dbdd03d15b4fc8a9a0e8049cd606"
+            }
+        ]
+      }
+    ],
+    "AdministrativeKeys": [
+      {
+        "Name": "Repository",
+        "Keys": [
+            {
+                "ID": "27df2c8187e7543345c2e0bf3a1262e0bc63a72754e9a7395eac3f747ec23a44"
+            }
+        ]
+      },
+      {
+        "Name": "Root",
+        "Keys": [
+            {
+                "ID": "40b66ccc8b176be8c7d365a17f3e046d1c3494e053dd57cfeacfe2e19c4f8e8f"
+            }
+        ]
+      }
+    ]
+  }
+]
+

If the image tag is unsigned or unavailable, docker trust inspect does not display any signed tags.

$ docker trust inspect unsigned-img
+
+No signatures or cannot access unsigned-img
+

However, if other tags are signed in the same image repository, docker trust inspect reports relevant key information:

$ docker trust inspect alpine:unsigned
+

The output is in JSON format, for example:

[
+  {
+    "Name": "alpine:unsigned",
+    "Signers": [],
+    "AdministrativeKeys": [
+      {
+        "Name": "Repository",
+        "Keys": [
+          {
+            "ID": "5a46c9aaa82ff150bb7305a2d17d0c521c2d784246807b2dc611f436a69041fd"
+          }
+        ]
+      },
+      {
+        "Name": "Root",
+        "Keys": [
+          {
+            "ID": "a2489bcac7a79aa67b19b96c4a3bf0c675ffdf00c6d2fabe1a5df1115e80adce"
+          }
+        ]
+      }
+    ]
+  }
+]
+

Get details about signatures for all image tags in a repository

If no tag is specified, docker trust inspect will report details for all signed tags in the repository:

$ docker trust inspect alpine
+

The output is in JSON format, for example:

[
+  {
+    "Name": "alpine",
+    "SignedTags": [
+      {
+        "SignedTag": "3.5",
+        "Digest": "b007a354427e1880de9cdba533e8e57382b7f2853a68a478a17d447b302c219c",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "3.6",
+        "Digest": "d6bfc3baf615dc9618209a8d607ba2a8103d9c8a405b3bd8741d88b4bef36478",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "edge",
+        "Digest": "23e7d843e63a3eee29b6b8cfcd10e23dd1ef28f47251a985606a31040bf8e096",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "latest",
+        "Digest": "d6bfc3baf615dc9618209a8d607ba2a8103d9c8a405b3bd8741d88b4bef36478",
+        "Signers": [
+          "Repo Admin"
+        ]
+      }
+    ],
+    "Signers": [],
+    "AdministrativeKeys": [
+      {
+        "Name": "Repository",
+        "Keys": [
+          {
+            "ID": "5a46c9aaa82ff150bb7305a2d17d0c521c2d784246807b2dc611f436a69041fd"
+          }
+        ]
+      },
+      {
+        "Name": "Root",
+        "Keys": [
+          {
+            "ID": "a2489bcac7a79aa67b19b96c4a3bf0c675ffdf00c6d2fabe1a5df1115e80adce"
+          }
+        ]
+      }
+    ]
+  }
+]
+

Get details about signatures for multiple images

docker trust inspect can take multiple repositories and images as arguments, and reports the results in an ordered list:

$ docker trust inspect alpine notary
+

The output is in JSON format, for example:

[
+  {
+    "Name": "alpine",
+    "SignedTags": [
+      {
+        "SignedTag": "3.5",
+        "Digest": "b007a354427e1880de9cdba533e8e57382b7f2853a68a478a17d447b302c219c",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "3.6",
+        "Digest": "d6bfc3baf615dc9618209a8d607ba2a8103d9c8a405b3bd8741d88b4bef36478",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "edge",
+        "Digest": "23e7d843e63a3eee29b6b8cfcd10e23dd1ef28f47251a985606a31040bf8e096",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "integ-test-base",
+        "Digest": "3952dc48dcc4136ccdde37fbef7e250346538a55a0366e3fccc683336377e372",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "latest",
+        "Digest": "d6bfc3baf615dc9618209a8d607ba2a8103d9c8a405b3bd8741d88b4bef36478",
+        "Signers": [
+          "Repo Admin"
+        ]
+      }
+    ],
+    "Signers": [],
+    "AdministrativeKeys": [
+      {
+        "Name": "Repository",
+        "Keys": [
+          {
+            "ID": "5a46c9aaa82ff150bb7305a2d17d0c521c2d784246807b2dc611f436a69041fd"
+          }
+        ]
+      },
+      {
+        "Name": "Root",
+        "Keys": [
+          {
+            "ID": "a2489bcac7a79aa67b19b96c4a3bf0c675ffdf00c6d2fabe1a5df1115e80adce"
+          }
+        ]
+      }
+    ]
+  },
+  {
+    "Name": "notary",
+    "SignedTags": [
+      {
+        "SignedTag": "server",
+        "Digest": "71f64ab718a3331dee103bc5afc6bc492914738ce37c2d2f127a8133714ecf5c",
+        "Signers": [
+          "Repo Admin"
+        ]
+      },
+      {
+        "SignedTag": "signer",
+        "Digest": "a6122d79b1e74f70b5dd933b18a6d1f99329a4728011079f06b245205f158fe8",
+        "Signers": [
+          "Repo Admin"
+        ]
+      }
+    ],
+    "Signers": [],
+    "AdministrativeKeys": [
+      {
+        "Name": "Root",
+        "Keys": [
+          {
+            "ID": "8cdcdef5bd039f4ab5a029126951b5985eebf57cabdcdc4d21f5b3be8bb4ce92"
+          }
+        ]
+      },
+      {
+        "Name": "Repository",
+        "Keys": [
+          {
+            "ID": "85bfd031017722f950d480a721f845a2944db26a3dc084040a70f1b0d9bbb3df"
+          }
+        ]
+      }
+    ]
+  }
+]
+

Formatting

You can print the inspect output in a human-readable format instead of the default JSON output, by using the --pretty option:

Get details about signatures for a single image tag

$ docker trust inspect --pretty alpine:latest
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+latest              1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe   (Repo Admin)
+
+Administrative keys for alpine:latest:
+Repository Key: 5a46c9aaa82ff150bb7305a2d17d0c521c2d784246807b2dc611f436a69041fd
+Root Key:       a2489bcac7a79aa67b19b96c4a3bf0c675ffdf00c6d2fabe1a5df1115e80adce
+

The SIGNED TAG is the signed image tag with a unique content-addressable DIGEST. SIGNERS lists all entities who have signed.

The administrative keys listed specify the root key of trust, as well as the administrative repository key. These keys are responsible for modifying signers, and rotating keys for the signed repository.

If signers are set up for the repository via other docker trust commands, docker trust inspect --pretty displays them appropriately as a SIGNER and specify their KEYS:

$ docker trust inspect --pretty my-image:purple
+
+SIGNED TAG          DIGEST                                                              SIGNERS
+purple              941d3dba358621ce3c41ef67b47cf80f701ff80cdf46b5cc86587eaebfe45557    alice, bob, carol
+
+List of signers and their keys:
+
+SIGNER              KEYS
+alice               47caae5b3e61, a85aab9d20a4
+bob                 034370bcbd77, 82a66673242c
+carol               b6f9f8e1aab0
+
+Administrative keys for my-image:
+Repository Key: 27df2c8187e7543345c2e0bf3a1262e0bc63a72754e9a7395eac3f747ec23a44
+Root Key:       40b66ccc8b176be8c7d365a17f3e046d1c3494e053dd57cfeacfe2e19c4f8e8f
+

However, if other tags are signed in the same image repository, docker trust inspect reports relevant key information.

$ docker trust inspect --pretty alpine:unsigned
+
+No signatures for alpine:unsigned
+
+
+Administrative keys for alpine:unsigned:
+Repository Key: 5a46c9aaa82ff150bb7305a2d17d0c521c2d784246807b2dc611f436a69041fd
+Root Key:       a2489bcac7a79aa67b19b96c4a3bf0c675ffdf00c6d2fabe1a5df1115e80adce
+

Get details about signatures for all image tags in a repository

$ docker trust inspect --pretty alpine
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+2.6                 9ace551613070689a12857d62c30ef0daa9a376107ec0fff0e34786cedb3399b   (Repo Admin)
+2.7                 9f08005dff552038f0ad2f46b8e65ff3d25641747d3912e3ea8da6785046561a   (Repo Admin)
+3.1                 d9477888b78e8c6392e0be8b2e73f8c67e2894ff9d4b8e467d1488fcceec21c8   (Repo Admin)
+3.2                 19826d59171c2eb7e90ce52bfd822993bef6a6fe3ae6bb4a49f8c1d0a01e99c7   (Repo Admin)
+3.3                 8fd4b76819e1e5baac82bd0a3d03abfe3906e034cc5ee32100d12aaaf3956dc7   (Repo Admin)
+3.4                 833ad81ace8277324f3ca8c91c02bdcf1d13988d8ecf8a3f97ecdd69d0390ce9   (Repo Admin)
+3.5                 af2a5bd2f8de8fc1ecabf1c76611cdc6a5f1ada1a2bdd7d3816e121b70300308   (Repo Admin)
+3.6                 1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe   (Repo Admin)
+edge                79d50d15bd7ea48ea00cf3dd343b0e740c1afaa8e899bee475236ef338e1b53b   (Repo Admin)
+latest              1072e499f3f655a032e88542330cf75b02e7bdf673278f701d7ba61629ee3ebe   (Repo Admin)
+
+Administrative keys for alpine:
+Repository Key: 5a46c9aaa82ff150bb7305a2d17d0c521c2d784246807b2dc611f436a69041fd
+Root Key:       a2489bcac7a79aa67b19b96c4a3bf0c675ffdf00c6d2fabe1a5df1115e80adce
+

Here’s an example with signers that are set up by docker trust commands:

$ docker trust inspect --pretty my-image
+
+SIGNED TAG          DIGEST                                                              SIGNERS
+red                 852cc04935f930a857b630edc4ed6131e91b22073bcc216698842e44f64d2943    alice
+blue                f1c38dbaeeb473c36716f6494d803fbfbe9d8a76916f7c0093f227821e378197    alice, bob
+green               cae8fedc840f90c8057e1c24637d11865743ab1e61a972c1c9da06ec2de9a139    alice, bob
+yellow              9cc65fc3126790e683d1b92f307a71f48f75fa7dd47a7b03145a123eaf0b45ba    carol
+purple              941d3dba358621ce3c41ef67b47cf80f701ff80cdf46b5cc86587eaebfe45557    alice, bob, carol
+orange              d6c271baa6d271bcc24ef1cbd65abf39123c17d2e83455bdab545a1a9093fc1c    alice
+
+List of signers and their keys for my-image:
+
+SIGNER              KEYS
+alice               47caae5b3e61, a85aab9d20a4
+bob                 034370bcbd77, 82a66673242c
+carol               b6f9f8e1aab0
+
+Administrative keys for my-image:
+Repository Key: 27df2c8187e7543345c2e0bf3a1262e0bc63a72754e9a7395eac3f747ec23a44
+Root Key:       40b66ccc8b176be8c7d365a17f3e046d1c3494e053dd57cfeacfe2e19c4f8e8f
+

Parent command

Command Description
docker trust Manage trust on Docker images
Command Description
docker trust inspect Return low-level information about keys and signatures
docker trust key Manage keys for signing Docker images
docker trust revoke Remove trust for an image
docker trust sign Sign an image
docker trust signer Manage entities who can sign Docker images
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key%2Findex.html new file mode 100644 index 00000000..630b7415 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key%2Findex.html @@ -0,0 +1,7 @@ +

docker trust key


Manage keys for signing Docker images

Usage

$ docker trust key COMMAND
+

Parent command

Command Description
docker trust Manage trust on Docker images

Child commands

Command Description
docker trust key generate Generate and load a signing key-pair
docker trust key load Load a private key file for signing
Command Description
docker trust inspect Return low-level information about keys and signatures
docker trust key Manage keys for signing Docker images
docker trust revoke Remove trust for an image
docker trust sign Sign an image
docker trust signer Manage entities who can sign Docker images
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_key/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_generate%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_generate%2Findex.html new file mode 100644 index 00000000..91ef2e04 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_generate%2Findex.html @@ -0,0 +1,23 @@ +

docker trust key generate


Generate and load a signing key-pair

Usage

$ docker trust key generate NAME
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

docker trust key generate generates a key-pair to be used with signing, and loads the private key into the local docker trust keystore.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--dir Directory to generate key in, defaults to current directory

Examples

Generate a key-pair

$ docker trust key generate alice
+
+Generating key for alice...
+Enter passphrase for new alice key with ID 17acf3c:
+Repeat passphrase for new alice key with ID 17acf3c:
+Successfully generated and loaded private key. Corresponding public key available: alice.pub
+$ ls
+alice.pub
+

The private signing key is encrypted by the passphrase and loaded into the docker trust keystore. All passphrase requests to sign with the key will be referred to by the provided NAME.

The public key component alice.pub will be available in the current working directory, and can be used directly by docker trust signer add.

Provide the --dir argument to specify a directory to generate the key in:

$ docker trust key generate alice --dir /foo
+
+Generating key for alice...
+Enter passphrase for new alice key with ID 17acf3c:
+Repeat passphrase for new alice key with ID 17acf3c:
+Successfully generated and loaded private key. Corresponding public key available: alice.pub
+$ ls /foo
+alice.pub
+

Parent command

Command Description
docker trust key Manage keys for signing Docker images
Command Description
docker trust key generate Generate and load a signing key-pair
docker trust key load Load a private key file for signing
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_key_generate/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_load%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_load%2Findex.html new file mode 100644 index 00000000..fedf8062 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_key_load%2Findex.html @@ -0,0 +1,19 @@ +

docker trust key load


Load a private key file for signing

Usage

$ docker trust key load [OPTIONS] KEYFILE
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

docker trust key load adds private keys to the local docker trust keystore.

To add a signer to a repository use docker trust signer add.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--name signer Name for the loaded key

Examples

Load a single private key

For a private key alice.pem with permissions -rw-------

$ docker trust key load alice.pem
+
+Loading key from "alice.pem"...
+Enter passphrase for new signer key with ID f8097df:
+Repeat passphrase for new signer key with ID f8097df:
+Successfully imported key from alice.pem
+

To specify a name use the --name flag:

$ docker trust key load --name alice-key alice.pem
+
+Loading key from "alice.pem"...
+Enter passphrase for new alice-key key with ID f8097df:
+Repeat passphrase for new alice-key key with ID f8097df:
+Successfully imported key from alice.pem
+

Parent command

Command Description
docker trust key Manage keys for signing Docker images
Command Description
docker trust key generate Generate and load a signing key-pair
docker trust key load Load a private key file for signing
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_key_load/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_revoke%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_revoke%2Findex.html new file mode 100644 index 00000000..2ec18cd7 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_revoke%2Findex.html @@ -0,0 +1,71 @@ +

docker trust revoke


Remove trust for an image

Usage

$ docker trust revoke [OPTIONS] IMAGE[:TAG]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

docker trust revoke removes signatures from tags in signed repositories.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--yes , -y + Do not prompt for confirmation

Examples

Revoke signatures from a signed tag

Here’s an example of a repo with two signed tags:

$ docker trust inspect --pretty example/trust-demo
+SIGNED TAG          DIGEST                                                              SIGNERS
+red                 852cc04935f930a857b630edc4ed6131e91b22073bcc216698842e44f64d2943    alice
+blue                f1c38dbaeeb473c36716f6494d803fbfbe9d8a76916f7c0093f227821e378197    alice, bob
+
+List of signers and their keys for example/trust-demo:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

When alice, one of the signers, runs docker trust revoke:

$ docker trust revoke example/trust-demo:red
+Enter passphrase for delegation key with ID 27d42a8:
+Successfully deleted signature for example/trust-demo:red
+

After revocation, the tag is removed from the list of released tags:

$ docker trust inspect --pretty example/trust-demo
+SIGNED TAG          DIGEST                                                              SIGNERS
+blue                f1c38dbaeeb473c36716f6494d803fbfbe9d8a76916f7c0093f227821e378197    alice, bob
+
+List of signers and their keys for example/trust-demo:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Revoke signatures on all tags in a repository

When no tag is specified, docker trust revokes all signatures that you have a signing key for.

$ docker trust inspect --pretty example/trust-demo
+SIGNED TAG          DIGEST                                                              SIGNERS
+red                 852cc04935f930a857b630edc4ed6131e91b22073bcc216698842e44f64d2943    alice
+blue                f1c38dbaeeb473c36716f6494d803fbfbe9d8a76916f7c0093f227821e378197    alice, bob
+
+List of signers and their keys for example/trust-demo:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

When alice, one of the signers, runs docker trust revoke:

$ docker trust revoke example/trust-demo
+Please confirm you would like to delete all signature data for example/trust-demo? [y/N] y
+Enter passphrase for delegation key with ID 27d42a8:
+Successfully deleted signature for example/trust-demo
+

All tags that have alice’s signature on them are removed from the list of released tags:

$ docker trust inspect --pretty example/trust-demo
+
+No signatures for example/trust-demo
+
+
+List of signers and their keys for example/trust-demo:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Parent command

Command Description
docker trust Manage trust on Docker images
Command Description
docker trust inspect Return low-level information about keys and signatures
docker trust key Manage keys for signing Docker images
docker trust revoke Remove trust for an image
docker trust sign Sign an image
docker trust signer Manage entities who can sign Docker images
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_revoke/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_sign%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_sign%2Findex.html new file mode 100644 index 00000000..fd8a5b81 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_sign%2Findex.html @@ -0,0 +1,80 @@ +

docker trust sign


Sign an image

Usage

$ docker trust sign IMAGE:TAG
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

docker trust sign adds signatures to tags to create signed repositories.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--local Sign a locally tagged image

Examples

Sign a tag as a repo admin

Given an image:

$ docker trust inspect --pretty example/trust-demo
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+v1                  c24134c079c35e698060beabe110bb83ab285d0d978de7d92fed2c8c83570a41   (Repo Admin)
+
+Administrative keys for example/trust-demo:
+Repository Key: 36d4c3601102fa7c5712a343c03b94469e5835fb27c191b529c06fd19c14a942
+Root Key:       246d360f7c53a9021ee7d4259e3c5692f3f1f7ad4737b1ea8c7b8da741ad980b
+

Sign a new tag with docker trust sign:

$ docker trust sign example/trust-demo:v2
+
+Signing and pushing trust metadata for example/trust-demo:v2
+The push refers to a repository [docker.io/example/trust-demo]
+eed4e566104a: Layer already exists
+77edfb6d1e3c: Layer already exists
+c69f806905c2: Layer already exists
+582f327616f1: Layer already exists
+a3fbb648f0bd: Layer already exists
+5eac2de68a97: Layer already exists
+8d4d1ab5ff74: Layer already exists
+v2: digest: sha256:8f6f460abf0436922df7eb06d28b3cdf733d2cac1a185456c26debbff0839c56 size: 1787
+Signing and pushing trust metadata
+Enter passphrase for repository key with ID 36d4c36:
+Successfully signed docker.io/example/trust-demo:v2
+

Use docker trust inspect --pretty to list the new signature:

$ docker trust inspect --pretty example/trust-demo
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+v1                  c24134c079c35e698060beabe110bb83ab285d0d978de7d92fed2c8c83570a41   (Repo Admin)
+v2                  8f6f460abf0436922df7eb06d28b3cdf733d2cac1a185456c26debbff0839c56   (Repo Admin)
+
+Administrative keys for example/trust-demo:
+Repository Key: 36d4c3601102fa7c5712a343c03b94469e5835fb27c191b529c06fd19c14a942
+Root Key:       246d360f7c53a9021ee7d4259e3c5692f3f1f7ad4737b1ea8c7b8da741ad980b
+

Sign a tag as a signer

Given an image:

$ docker trust inspect --pretty example/trust-demo
+
+No signatures for example/trust-demo
+
+
+List of signers and their keys for example/trust-demo:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Sign a new tag with docker trust sign:

$ docker trust sign example/trust-demo:v1
+
+Signing and pushing trust metadata for example/trust-demo:v1
+The push refers to a repository [docker.io/example/trust-demo]
+26b126eb8632: Layer already exists
+220d34b5f6c9: Layer already exists
+8a5132998025: Layer already exists
+aca233ed29c3: Layer already exists
+e5d2f035d7a4: Layer already exists
+v1: digest: sha256:74d4bfa917d55d53c7df3d2ab20a8d926874d61c3da5ef6de15dd2654fc467c4 size: 1357
+Signing and pushing trust metadata
+Enter passphrase for delegation key with ID 27d42a8:
+Successfully signed docker.io/example/trust-demo:v1
+

docker trust inspect --pretty lists the new signature:

$ docker trust inspect --pretty example/trust-demo
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+v1                  74d4bfa917d55d53c7df3d2ab20a8d926874d61c3da5ef6de15dd2654fc467c4   alice
+
+List of signers and their keys for example/trust-demo:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Parent command

Command Description
docker trust Manage trust on Docker images
Command Description
docker trust inspect Return low-level information about keys and signatures
docker trust key Manage keys for signing Docker images
docker trust revoke Remove trust for an image
docker trust sign Sign an image
docker trust signer Manage entities who can sign Docker images
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_sign/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer%2Findex.html new file mode 100644 index 00000000..30c6e0bf --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer%2Findex.html @@ -0,0 +1,7 @@ +

docker trust signer


Manage entities who can sign Docker images

Usage

$ docker trust signer COMMAND
+

Parent command

Command Description
docker trust Manage trust on Docker images

Child commands

Command Description
docker trust signer add Add a signer
docker trust signer remove Remove a signer
Command Description
docker trust inspect Return low-level information about keys and signatures
docker trust key Manage keys for signing Docker images
docker trust revoke Remove trust for an image
docker trust sign Sign an image
docker trust signer Manage entities who can sign Docker images
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_signer/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_add%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_add%2Findex.html new file mode 100644 index 00000000..603576f6 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_add%2Findex.html @@ -0,0 +1,38 @@ +

docker trust signer add


Add a signer

Usage

$ docker trust signer add OPTIONS NAME REPOSITORY [REPOSITORY...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

docker trust signer add adds signers to signed repositories.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--key Path to the signer's public key file

Examples

Add a signer to a repo

To add a new signer, alice, to this repository:

$ docker trust inspect --pretty example/trust-demo
+
+No signatures for example/trust-demo
+
+
+List of signers and their keys:
+
+SIGNER              KEYS
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: 642692c14c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Add alice with docker trust signer add:

$ docker trust signer add alice example/trust-demo --key alice.crt
+  Adding signer "alice" to example/trust-demo...
+  Enter passphrase for repository key with ID 642692c:
+Successfully added signer: alice to example/trust-demo
+

docker trust inspect --pretty now lists alice as a valid signer:

$ docker trust inspect --pretty example/trust-demo
+
+No signatures for example/trust-demo
+
+
+List of signers and their keys:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: 642692c14c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Parent command

Command Description
docker trust signer Manage entities who can sign Docker images
Command Description
docker trust signer add Add a signer
docker trust signer remove Remove a signer
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_signer_add/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_remove%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_remove%2Findex.html new file mode 100644 index 00000000..79cda963 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Ftrust_signer_remove%2Findex.html @@ -0,0 +1,114 @@ +

docker trust signer remove


Remove a signer

Usage

$ docker trust signer remove [OPTIONS] NAME REPOSITORY [REPOSITORY...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

docker trust signer remove removes signers from signed repositories.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Do not prompt for confirmation before removing the most recent signer

Examples

Remove a signer from a repo

To remove an existing signer, alice, from this repository:

$ docker trust inspect --pretty example/trust-demo
+
+No signatures for example/trust-demo
+
+
+List of signers and their keys:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Remove alice with docker trust signer remove:

$ docker trust signer remove alice example/trust-demo
+
+Removing signer "alice" from image example/trust-demo...
+Enter passphrase for repository key with ID 642692c:
+Successfully removed alice from example/trust-demo
+

docker trust inspect --pretty now does not list alice as a valid signer:

$ docker trust inspect --pretty example/trust-demo
+
+No signatures for example/trust-demo
+
+
+List of signers and their keys:
+
+SIGNER              KEYS
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Remove a signer from multiple repos

To remove an existing signer, alice, from multiple repositories:

$ docker trust inspect --pretty example/trust-demo
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+v1                  74d4bfa917d55d53c7df3d2ab20a8d926874d61c3da5ef6de15dd2654fc467c4   alice, bob
+
+List of signers and their keys:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: 95b9e5514c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+
$ docker trust inspect --pretty example/trust-demo2
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+v1                  74d4bfa917d55d53c7df3d2ab20a8d926874d61c3da5ef6de15dd2654fc467c4   alice, bob
+
+List of signers and their keys:
+
+SIGNER              KEYS
+alice               05e87edcaecb
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo2:
+Repository Key: ece554f14c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4553d2ab20a8d9268
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

Remove alice from both images with a single docker trust signer remove command:

$ docker trust signer remove alice example/trust-demo example/trust-demo2
+
+Removing signer "alice" from image example/trust-demo...
+Enter passphrase for repository key with ID 95b9e55:
+Successfully removed alice from example/trust-demo
+
+Removing signer "alice" from image example/trust-demo2...
+Enter passphrase for repository key with ID ece554f:
+Successfully removed alice from example/trust-demo2
+

Run docker trust inspect --pretty to confirm that alice is no longer listed as a valid signer of either example/trust-demo or example/trust-demo2:

$ docker trust inspect --pretty example/trust-demo
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+v1                  74d4bfa917d55d53c7df3d2ab20a8d926874d61c3da5ef6de15dd2654fc467c4   bob
+
+List of signers and their keys:
+
+SIGNER              KEYS
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo:
+Repository Key: ecc457614c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4555b3c6ab02f71e
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+
$ docker trust inspect --pretty example/trust-demo2
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+v1                  74d4bfa917d55d53c7df3d2ab20a8d926874d61c3da5ef6de15dd2654fc467c4   bob
+
+List of signers and their keys:
+
+SIGNER              KEYS
+bob                 5600f5ab76a2
+
+Administrative keys for example/trust-demo2:
+Repository Key: ece554f14c9fc399da523a5f4e24fe306a0a6ee1cc79a10e4553d2ab20a8d9268
+Root Key:       3cb2228f6561e58f46dbc4cda4fcaff9d5ef22e865a94636f82450d1d2234949
+

docker trust signer remove removes signers to repositories on a best effort basis, so it will continue to remove the signer from subsequent repositories if one attempt fails:

$ docker trust signer remove alice example/unauthorized example/authorized
+
+Removing signer "alice" from image example/unauthorized...
+No signer alice for image example/unauthorized
+
+Removing signer "alice" from image example/authorized...
+Enter passphrase for repository key with ID c6772a0:
+Successfully removed alice from example/authorized
+
+Error removing signer from: example/unauthorized
+

Parent command

Command Description
docker trust signer Manage entities who can sign Docker images
Command Description
docker trust signer add Add a signer
docker trust signer remove Remove a signer
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/trust_signer_remove/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Funpause%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Funpause%2Findex.html new file mode 100644 index 00000000..74ac93cc --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Funpause%2Findex.html @@ -0,0 +1,9 @@ +

docker unpause


Unpause all processes within one or more containers

Usage

$ docker unpause CONTAINER [CONTAINER...]
+

Description

The docker unpause command un-suspends all processes in the specified containers. On Linux, it does this using the freezer cgroup.

See the freezer cgroup documentation for further details.

For example uses of this command, refer to the examples section below.

Examples

$ docker unpause my_container
+my_container
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/unpause/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fupdate%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fupdate%2Findex.html new file mode 100644 index 00000000..b4cc7bec --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fupdate%2Findex.html @@ -0,0 +1,18 @@ +

docker update


Update configuration of one or more containers

Usage

$ docker update [OPTIONS] CONTAINER [CONTAINER...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

The docker update command dynamically updates container configuration. You can use this command to prevent containers from consuming too many resources from their Docker host. With a single command, you can place limits on a single container or on many. To specify more than one container, provide space-separated list of container names or IDs.

With the exception of the --kernel-memory option, you can specify these options on a running or a stopped container. On kernel version older than 4.6, you can only update --kernel-memory on a stopped container or on a running container with kernel memory initialized.

Warning

The docker update and docker container update commands are not supported for Windows containers.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--blkio-weight Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)
--cpu-period Limit CPU CFS (Completely Fair Scheduler) period
--cpu-quota Limit CPU CFS (Completely Fair Scheduler) quota
--cpu-rt-period Limit the CPU real-time period in microseconds
--cpu-rt-runtime Limit the CPU real-time runtime in microseconds
+--cpu-shares , -c + CPU shares (relative weight)
--cpus Number of CPUs
--cpuset-cpus CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems MEMs in which to allow execution (0-3, 0,1)
--kernel-memory Kernel memory limit
+--memory , -m + Memory limit
--memory-reservation Memory soft limit
--memory-swap Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--pids-limit +API 1.40+
Tune container pids limit (set -1 for unlimited)
--restart Restart policy to apply when a container exits

Examples

The following sections illustrate ways to use this command.

Update a container’s cpu-shares

To limit a container’s cpu-shares to 512, first identify the container name or ID. You can use docker ps to find these values. You can also use the ID returned from the docker run command. Then, do the following:

$ docker update --cpu-shares 512 abebf7571666
+

Update a container with cpu-shares and memory

To update multiple resource configurations for multiple containers:

$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse
+

Update a container’s kernel memory constraints

You can update a container’s kernel memory limit using the --kernel-memory option. On kernel version older than 4.6, this option can be updated on a running container only if the container was started with --kernel-memory. If the container was started without --kernel-memory you need to stop the container before updating kernel memory.

Note

The --kernel-memory option has been deprecated since Docker 20.10.

For example, if you started a container with this command:

$ docker run -dit --name test --kernel-memory 50M ubuntu bash
+

You can update kernel memory while the container is running:

$ docker update --kernel-memory 80M test
+

If you started a container without kernel memory initialized:

$ docker run -dit --name test2 --memory 300M ubuntu bash
+

Update kernel memory of running container test2 will fail. You need to stop the container before updating the --kernel-memory setting. The next time you start it, the container uses the new value.

Kernel version newer than (include) 4.6 does not have this limitation, you can use --kernel-memory the same way as other options.

Update a container’s restart policy

You can change a container’s restart policy on a running container. The new restart policy takes effect instantly after you run docker update on a container.

To update restart policy for one or more containers:

$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse
+

Note that if the container is started with “--rm” flag, you cannot update the restart policy for it. The AutoRemove and RestartPolicy are mutually exclusive for the container.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/update/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fversion%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fversion%2Findex.html new file mode 100644 index 00000000..e5b59c7f --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fversion%2Findex.html @@ -0,0 +1,64 @@ +

docker version


Show the Docker version information

Usage

$ docker version [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

By default, this will render all version information in an easy to read layout. If a format is specified, the given template will be executed instead.

Go’s text/template package describes all the details of the format.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template
--kubeconfig +deprecatedKubernetes
Kubernetes config file

Examples

Default output

$ docker version
+
+Client:
+ Version:           19.03.8
+ API version:       1.40
+ Go version:        go1.12.17
+ Git commit:        afacb8b
+ Built:             Wed Mar 11 01:21:11 2020
+ OS/Arch:           darwin/amd64
+ Context:           default
+ Experimental:      true
+
+Server:
+ Engine:
+  Version:          19.03.8
+  API version:      1.40 (minimum version 1.12)
+  Go version:       go1.12.17
+  Git commit:       afacb8b
+  Built:            Wed Mar 11 01:29:16 2020
+  OS/Arch:          linux/amd64
+  Experimental:     true
+ containerd:
+  Version:          v1.2.13
+  GitCommit:        7ad184331fa3e55e52b890ea95e65ba581ae3429
+ runc:
+  Version:          1.0.0-rc10
+  GitCommit:        dc9208a3303feef5b3839f4323d9beb36df0a9dd
+ docker-init:
+  Version:          0.18.0
+  GitCommit:        fec3683
+

Get the server version

$ docker version --format '{{.Server.Version}}'
+
+19.03.8
+

Dump raw JSON data

$ docker version --format '{{json .}}'
+
+{"Client":{"Platform":{"Name":"Docker Engine - Community"},"Version":"19.03.8","ApiVersion":"1.40","DefaultAPIVersion":"1.40","GitCommit":"afacb8b","GoVersion":"go1.12.17","Os":"darwin","Arch":"amd64","BuildTime":"Wed Mar 11 01:21:11 2020","Experimental":true},"Server":{"Platform":{"Name":"Docker Engine - Community"},"Components":[{"Name":"Engine","Version":"19.03.8","Details":{"ApiVersion":"1.40","Arch":"amd64","BuildTime":"Wed Mar 11 01:29:16 2020","Experimental":"true","GitCommit":"afacb8b","GoVersion":"go1.12.17","KernelVersion":"4.19.76-linuxkit","MinAPIVersion":"1.12","Os":"linux"}},{"Name":"containerd","Version":"v1.2.13","Details":{"GitCommit":"7ad184331fa3e55e52b890ea95e65ba581ae3429"}},{"Name":"runc","Version":"1.0.0-rc10","Details":{"GitCommit":"dc9208a3303feef5b3839f4323d9beb36df0a9dd"}},{"Name":"docker-init","Version":"0.18.0","Details":{"GitCommit":"fec3683"}}],"Version":"19.03.8","ApiVersion":"1.40","MinAPIVersion":"1.12","GitCommit":"afacb8b","GoVersion":"go1.12.17","Os":"linux","Arch":"amd64","KernelVersion":"4.19.76-linuxkit","Experimental":true,"BuildTime":"2020-03-11T01:29:16.000000000+00:00"}}
+

The following example prints the currently used docker context:

$ docker version --format='{{.Client.Context}}'
+default
+

As an example, this output can be used to dynamically change your shell prompt to indicate your active context. The example below illustrates how this output could be used when using Bash as your shell.

Declare a function to obtain the current context in your ~/.bashrc, and set this command as your PROMPT_COMMAND

function docker_context_prompt() {
+        PS1="context: $(docker version --format='{{.Client.Context}}')> "
+}
+
+PROMPT_COMMAND=docker_context_prompt
+

After reloading the ~/.bashrc, the prompt now shows the currently selected docker context:

$ source ~/.bashrc
+context: default> docker context create --docker host=unix:///var/run/docker.sock my-context
+my-context
+Successfully created context "my-context"
+context: default> docker context use my-context
+my-context
+Current context is now "my-context"
+context: my-context> docker context use default
+default
+Current context is now "default"
+context: default>
+

Refer to the docker context section in the command line reference for more information about docker context.

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/version/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume%2Findex.html new file mode 100644 index 00000000..cd4b9e7e --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume%2Findex.html @@ -0,0 +1,7 @@ +

docker volume


Manage volumes

Usage

$ docker volume COMMAND COMMAND
+

Description

Manage volumes. You can use subcommands to create, inspect, list, remove, or prune volumes.

Child commands

Command Description
docker volume create Create a volume
docker volume inspect Display detailed information on one or more volumes
docker volume ls List volumes
docker volume prune Remove all unused local volumes
docker volume rm Remove one or more volumes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/volume/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_create%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_create%2Findex.html new file mode 100644 index 00000000..6e378e60 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_create%2Findex.html @@ -0,0 +1,35 @@ +

docker volume create


Create a volume

Usage

$ docker volume create [OPTIONS] [VOLUME]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Creates a new volume that containers can consume and store data in. If a name is not specified, Docker generates a random name.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--driver , -d + local Specify volume driver name
--label Set metadata for a volume
--name Specify volume name
+--opt , -o + Set driver specific options

Examples

Create a volume and then configure the container to use it:

$ docker volume create hello
+
+hello
+
+$ docker run -d -v hello:/world busybox ls /world
+

The mount is created inside the container’s /world directory. Docker does not support relative paths for mount points inside the container.

Multiple containers can use the same volume in the same time period. This is useful if two containers need access to shared data. For example, if one container writes and the other reads the data.

Volume names must be unique among drivers. This means you cannot use the same volume name with two different drivers. If you attempt this docker returns an error:

A volume named  "hello"  already exists with the "some-other" driver. Choose a different volume name.
+

If you specify a volume name already in use on the current driver, Docker assumes you want to re-use the existing volume and does not return an error.

Driver-specific options

Some volume drivers may take options to customize the volume creation. Use the -o or --opt flags to pass driver options:

$ docker volume create --driver fake \
+    --opt tardis=blue \
+    --opt timey=wimey \
+    foo
+

These options are passed directly to the volume driver. Options for different volume drivers may do different things (or nothing at all).

The built-in local driver on Windows does not support any options.

The built-in local driver on Linux accepts options similar to the linux mount command. You can provide multiple options by passing the --opt flag multiple times. Some mount options (such as the o option) can take a comma-separated list of options. Complete list of available mount options can be found here.

For example, the following creates a tmpfs volume called foo with a size of 100 megabyte and uid of 1000.

$ docker volume create --driver local \
+    --opt type=tmpfs \
+    --opt device=tmpfs \
+    --opt o=size=100m,uid=1000 \
+    foo
+

Another example that uses btrfs:

$ docker volume create --driver local \
+    --opt type=btrfs \
+    --opt device=/dev/sda2 \
+    foo
+

Another example that uses nfs to mount the /path/to/dir in rw mode from 192.168.1.1:

$ docker volume create --driver local \
+    --opt type=nfs \
+    --opt o=addr=192.168.1.1,rw \
+    --opt device=:/path/to/dir \
+    foo
+

Parent command

Command Description
docker volume Manage volumes
Command Description
docker volume create Create a volume
docker volume inspect Display detailed information on one or more volumes
docker volume ls List volumes
docker volume prune Remove all unused local volumes
docker volume rm Remove one or more volumes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/volume_create/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_inspect%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_inspect%2Findex.html new file mode 100644 index 00000000..78871751 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_inspect%2Findex.html @@ -0,0 +1,27 @@ +

docker volume inspect


Display detailed information on one or more volumes

Usage

$ docker volume inspect [OPTIONS] VOLUME [VOLUME...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Returns information about a volume. By default, this command renders all results in a JSON array. You can specify an alternate format to execute a given template for each result. Go’s text/template package describes all the details of the format.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--format , -f + Format the output using the given Go template

Examples

$ docker volume create myvolume
+
+myvolume
+

Use the docker volume inspect comment to inspect the configuration of the volume:

$ docker volume inspect myvolume
+

The output is in JSON format, for example:

[
+  {
+    "CreatedAt": "2020-04-19T11:00:21Z",
+    "Driver": "local",
+    "Labels": {},
+    "Mountpoint": "/var/lib/docker/volumes/8140a838303144125b4f54653b47ede0486282c623c3551fbc7f390cdc3e9cf5/_data",
+    "Name": "myvolume",
+    "Options": {},
+    "Scope": "local"
+  }
+]
+

Use the --format flag to format the output using a Go template, for example, to print the Mountpoint property:

$ docker volume inspect --format '{{ .Mountpoint }}' myvolume
+
+/var/lib/docker/volumes/myvolume/_data
+

Parent command

Command Description
docker volume Manage volumes
Command Description
docker volume create Create a volume
docker volume inspect Display detailed information on one or more volumes
docker volume ls List volumes
docker volume prune Remove all unused local volumes
docker volume rm Remove one or more volumes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/volume_inspect/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_ls%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_ls%2Findex.html new file mode 100644 index 00000000..d55482fa --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_ls%2Findex.html @@ -0,0 +1,63 @@ +

docker volume ls


List volumes

Usage

$ docker volume ls [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

List all the volumes known to Docker. You can filter using the -f or --filter flag. Refer to the filtering section for more information about available filter options.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--filter , -f + Provide filter values (e.g. 'dangling=true')
--format Pretty-print volumes using a Go template
+--quiet , -q + Only display volume names

Examples

Create a volume

$ docker volume create rosemary
+
+rosemary
+
+$ docker volume create tyler
+
+tyler
+
+$ docker volume ls
+
+DRIVER              VOLUME NAME
+local               rosemary
+local               tyler
+

Filtering

The filtering flag (-f or --filter) format is of “key=value”. If there is more than one filter, then pass multiple flags (e.g., --filter "foo=bar" --filter "bif=baz")

The currently supported filters are:

dangling

The dangling filter matches on all volumes not referenced by any containers

$ docker run -d  -v tyler:/tmpwork  busybox
+
+f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18
+$ docker volume ls -f dangling=true
+DRIVER              VOLUME NAME
+local               rosemary
+

driver

The driver filter matches volumes based on their driver.

The following example matches volumes that are created with the local driver:

$ docker volume ls -f driver=local
+
+DRIVER              VOLUME NAME
+local               rosemary
+local               tyler
+

label

The label filter matches volumes based on the presence of a label alone or a label and a value.

First, let’s create some volumes to illustrate this;

$ docker volume create the-doctor --label is-timelord=yes
+
+the-doctor
+$ docker volume create daleks --label is-timelord=no
+
+daleks
+

The following example filter matches volumes with the is-timelord label regardless of its value.

$ docker volume ls --filter label=is-timelord
+
+DRIVER              VOLUME NAME
+local               daleks
+local               the-doctor
+

As the above example demonstrates, both volumes with is-timelord=yes, and is-timelord=no are returned.

Filtering on both key and value of the label, produces the expected result:

$ docker volume ls --filter label=is-timelord=yes
+
+DRIVER              VOLUME NAME
+local               the-doctor
+

Specifying multiple label filter produces an “and” search; all conditions should be met;

$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no
+
+DRIVER              VOLUME NAME
+

name

The name filter matches on all or part of a volume’s name.

The following filter matches all volumes with a name containing the rose string.

$ docker volume ls -f name=rose
+
+DRIVER              VOLUME NAME
+local               rosemary
+

Formatting

The formatting options (--format) pretty-prints volumes output using a Go template.

Valid placeholders for the Go template are listed below:

Placeholder Description
.Name Volume name
.Driver Volume driver
.Scope Volume scope (local, global)
.Mountpoint The mount point of the volume on the host
.Labels All labels assigned to the volume
.Label Value of a specific label for this volume. For example {{.Label "project.version"}} +

When using the --format option, the volume ls command will either output the data exactly as the template declares or, when using the table directive, includes column headers as well.

The following example uses a template without headers and outputs the Name and Driver entries separated by a colon (:) for all volumes:

$ docker volume ls --format "{{.Name}}: {{.Driver}}"
+
+vol1: local
+vol2: local
+vol3: local
+

Parent command

Command Description
docker volume Manage volumes
Command Description
docker volume create Create a volume
docker volume inspect Display detailed information on one or more volumes
docker volume ls List volumes
docker volume prune Remove all unused local volumes
docker volume rm Remove one or more volumes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/volume_ls/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_prune%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_prune%2Findex.html new file mode 100644 index 00000000..50aa4326 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_prune%2Findex.html @@ -0,0 +1,18 @@ +

docker volume prune


Remove all unused local volumes

Usage

$ docker volume prune [OPTIONS]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Remove all unused local volumes. Unused local volumes are those which are not referenced by any containers

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
--filter Provide filter values (e.g. 'label=<label>')
+--force , -f + Do not prompt for confirmation

Examples

$ docker volume prune
+
+WARNING! This will remove all local volumes not used by at least one container.
+Are you sure you want to continue? [y/N] y
+Deleted Volumes:
+07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e
+my-named-vol
+
+Total reclaimed space: 36 B
+

Parent command

Command Description
docker volume Manage volumes
Command Description
docker volume create Create a volume
docker volume inspect Display detailed information on one or more volumes
docker volume ls List volumes
docker volume prune Remove all unused local volumes
docker volume rm Remove one or more volumes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/volume_prune/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_rm%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_rm%2Findex.html new file mode 100644 index 00000000..0533b13d --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fvolume_rm%2Findex.html @@ -0,0 +1,12 @@ +

docker volume rm


Remove one or more volumes

Usage

$ docker volume rm [OPTIONS] VOLUME [VOLUME...]
+

Refer to the options section for an overview of available OPTIONS for this command.

Description

Remove one or more volumes. You cannot remove a volume that is in use by a container.

For example uses of this command, refer to the examples section below.

Options

Name, shorthand Default Description
+--force , -f + Force the removal of one or more volumes

Examples

$ docker volume rm hello
+
+hello
+

Parent command

Command Description
docker volume Manage volumes
Command Description
docker volume create Create a volume
docker volume inspect Display detailed information on one or more volumes
docker volume ls List volumes
docker volume prune Remove all unused local volumes
docker volume rm Remove one or more volumes
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/volume_rm/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Fcommandline%2Fwait%2Findex.html b/devdocs/docker/engine%2Freference%2Fcommandline%2Fwait%2Findex.html new file mode 100644 index 00000000..c08d91dd --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Fcommandline%2Fwait%2Findex.html @@ -0,0 +1,13 @@ +

docker wait


Block until one or more containers stop, then print their exit codes

Usage

$ docker wait CONTAINER [CONTAINER...]
+

For example uses of this command, refer to the examples section below.

Examples

Start a container in the background.

$ docker run -dit --name=my_container ubuntu bash
+

Run docker wait, which should block until the container exits.

$ docker wait my_container
+

In another terminal, stop the first container. The docker wait command above returns the exit code.

$ docker stop my_container
+

This is the same docker wait command from above, but it now exits, returning 0.

$ docker wait my_container
+
+0
+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/commandline/wait/ +

+
diff --git a/devdocs/docker/engine%2Freference%2Frun%2Findex.html b/devdocs/docker/engine%2Freference%2Frun%2Findex.html new file mode 100644 index 00000000..80bd9cc3 --- /dev/null +++ b/devdocs/docker/engine%2Freference%2Frun%2Findex.html @@ -0,0 +1,333 @@ +

Docker run reference

Docker runs processes in isolated containers. A container is a process which runs on a host. The host may be local or remote. When an operator executes docker run, the container process that runs is isolated in that it has its own file system, its own networking, and its own isolated process tree separate from the host.

This page details how to use the docker run command to define the container’s resources at runtime.

General form

The basic docker run command takes this form:

$ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...]
+

The docker run command must specify an IMAGE to derive the container from. An image developer can define image defaults related to:

With the docker run [OPTIONS] an operator can add to or override the image defaults set by a developer. And, additionally, operators can override nearly all the defaults set by the Docker runtime itself. The operator’s ability to override image and Docker runtime defaults is why run has more options than any other docker command.

To learn how to interpret the types of [OPTIONS], see Option types.

Note

Depending on your Docker system configuration, you may be required to preface the docker run command with sudo. To avoid having to use sudo with the docker command, your system administrator can create a Unix group called docker and add users to it. For more information about this configuration, refer to the Docker installation documentation for your operating system.

Operator exclusive options

Only the operator (the person executing docker run) can set the following options.

Detached vs foreground

When starting a Docker container, you must first decide if you want to run the container in the background in a “detached” mode or in the default foreground mode:

-d=false: Detached mode: Run container in the background, print new container id
+

Detached (-d)

To start a container in detached mode, you use -d=true or just -d option. By design, containers started in detached mode exit when the root process used to run the container exits, unless you also specify the --rm option. If you use -d with --rm, the container is removed when it exits or when the daemon exits, whichever happens first.

Do not pass a service x start command to a detached container. For example, this command attempts to start the nginx service.

$ docker run -d -p 80:80 my_image service nginx start
+

This succeeds in starting the nginx service inside the container. However, it fails the detached container paradigm in that, the root process (service nginx start) returns and the detached container stops as designed. As a result, the nginx service is started but could not be used. Instead, to start a process such as the nginx web server do the following:

$ docker run -d -p 80:80 my_image nginx -g 'daemon off;'
+

To do input/output with a detached container use network connections or shared volumes. These are required because the container is no longer listening to the command line where docker run was run.

To reattach to a detached container, use docker attach command.

Foreground

In foreground mode (the default when -d is not specified), docker run can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most command line executables expect) and pass along signals. All of that is configurable:

-a=[]           : Attach to `STDIN`, `STDOUT` and/or `STDERR`
+-t              : Allocate a pseudo-tty
+--sig-proxy=true: Proxy all received signals to the process (non-TTY mode only)
+-i              : Keep STDIN open even if not attached
+

If you do not specify -a then Docker will attach to both stdout and stderr . You can specify to which of the three standard streams (STDIN, STDOUT, STDERR) you’d like to connect instead, as in:

$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash
+

For interactive processes (like a shell), you must use -i -t together in order to allocate a tty for the container process. -i -t is often written -it as you’ll see in later examples. Specifying -t is forbidden when the client is receiving its standard input from a pipe, as in:

$ echo test | docker run -i busybox cat
+

Note

A process running as PID 1 inside a container is treated specially by Linux: it ignores any signal with the default action. As a result, the process will not terminate on SIGINT or SIGTERM unless it is coded to do so.

Container identification

Name (--name)

The operator can identify a container in three ways:

Identifier type Example value
UUID long identifier “f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”
UUID short identifier “f78375b1c487”
Name “evil_ptolemy”

The UUID identifiers come from the Docker daemon. If you do not assign a container name with the --name option, then the daemon generates a random string name for you. Defining a name can be a handy way to add meaning to a container. If you specify a name, you can use it when referencing the container within a Docker network. This works for both background and foreground Docker containers.

Note

Containers on the default bridge network must be linked to communicate by name.

PID equivalent

Finally, to help with automation, you can have Docker write the container ID out to a file of your choosing. This is similar to how some programs might write out their process ID to a file (you’ve seen them as PID files):

--cidfile="": Write the container ID to the file
+

Image[:tag]

While not strictly a means of identifying a container, you can specify a version of an image you’d like to run the container with by adding image[:tag] to the command. For example, docker run ubuntu:14.04.

Image[@digest]

Images using the v2 or later image format have a content-addressable identifier called a digest. As long as the input used to generate the image is unchanged, the digest value is predictable and referenceable.

The following example runs a container from the alpine image with the sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 digest:

$ docker run alpine@sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 date
+

PID settings (--pid)

--pid=""  : Set the PID (Process) Namespace mode for the container,
+             'container:<name|id>': joins another container's PID namespace
+             'host': use the host's PID namespace inside the container
+

By default, all containers have the PID namespace enabled.

PID namespace provides separation of processes. The PID Namespace removes the view of the system processes, and allows process ids to be reused including pid 1.

In certain cases you want your container to share the host’s process namespace, basically allowing processes within the container to see all of the processes on the system. For example, you could build a container with debugging tools like strace or gdb, but want to use these tools when debugging processes within the container.

Example: run htop inside a container

Create this Dockerfile:

FROM alpine:latest
+RUN apk add --update htop && rm -rf /var/cache/apk/*
+CMD ["htop"]
+

Build the Dockerfile and tag the image as myhtop:

$ docker build -t myhtop .
+

Use the following command to run htop inside a container:

$ docker run -it --rm --pid=host myhtop
+

Joining another container’s pid namespace can be used for debugging that container.

Example

Start a container running a redis server:

$ docker run --name my-redis -d redis
+

Debug the redis container by running another container that has strace in it:

$ docker run -it --pid=container:my-redis my_strace_docker_image bash
+$ strace -p 1
+

UTS settings (--uts)

--uts=""  : Set the UTS namespace mode for the container,
+       'host': use the host's UTS namespace inside the container
+

The UTS namespace is for setting the hostname and the domain that is visible to running processes in that namespace. By default, all containers, including those with --network=host, have their own UTS namespace. The host setting will result in the container using the same UTS namespace as the host. Note that --hostname and --domainname are invalid in host UTS mode.

You may wish to share the UTS namespace with the host if you would like the hostname of the container to change as the hostname of the host changes. A more advanced use case would be changing the host’s hostname from a container.

IPC settings (--ipc)

--ipc="MODE"  : Set the IPC mode for the container
+

The following values are accepted:

Value Description
”” Use daemon’s default.
“none” Own private IPC namespace, with /dev/shm not mounted.
“private” Own private IPC namespace.
“shareable” Own private IPC namespace, with a possibility to share it with other containers.
“container: <_name-or-ID_>" Join another (“shareable”) container’s IPC namespace.
“host” Use the host system’s IPC namespace.

If not specified, daemon default is used, which can either be "private" or "shareable", depending on the daemon version and configuration.

IPC (POSIX/SysV IPC) namespace provides separation of named shared memory segments, semaphores and message queues.

Shared memory segments are used to accelerate inter-process communication at memory speed, rather than through pipes or through the network stack. Shared memory is commonly used by databases and custom-built (typically C/OpenMPI, C++/using boost libraries) high performance applications for scientific computing and financial services industries. If these types of applications are broken into multiple containers, you might need to share the IPC mechanisms of the containers, using "shareable" mode for the main (i.e. “donor”) container, and "container:<donor-name-or-ID>" for other containers.

Network settings

--dns=[]           : Set custom dns servers for the container
+--network="bridge" : Connect a container to a network
+                      'bridge': create a network stack on the default Docker bridge
+                      'none': no networking
+                      'container:<name|id>': reuse another container's network stack
+                      'host': use the Docker host network stack
+                      '<network-name>|<network-id>': connect to a user-defined network
+--network-alias=[] : Add network-scoped alias for the container
+--add-host=""      : Add a line to /etc/hosts (host:IP)
+--mac-address=""   : Sets the container's Ethernet device's MAC address
+--ip=""            : Sets the container's Ethernet device's IPv4 address
+--ip6=""           : Sets the container's Ethernet device's IPv6 address
+--link-local-ip=[] : Sets one or more container's Ethernet device's link local IPv4/IPv6 addresses
+

By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking with docker run --network none which disables all incoming and outgoing networking. In cases like this, you would perform I/O through files or STDIN and STDOUT only.

Publishing ports and linking to other containers only works with the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking.

Your container will use the same DNS servers as the host by default, but you can override this with --dns.

By default, the MAC address is generated using the IP address allocated to the container. You can set the container’s MAC address explicitly by providing a MAC address via the --mac-address parameter (format:12:34:56:78:9a:bc).Be aware that Docker does not check if manually specified MAC addresses are unique.

Supported networks :

Network Description
none No networking in the container.
+bridge (default) Connect the container to the bridge via veth interfaces.
host Use the host's network stack inside the container.
+container:<name|id> Use the network stack of another container, specified via its name or id.
NETWORK Connects the container to a user created network (using docker network create command)

Network: none

With the network is none a container will not have access to any external routes. The container will still have a loopback interface enabled in the container but it does not have any routes to external traffic.

Network: bridge

With the network set to bridge a container will use docker’s default networking setup. A bridge is setup on the host, commonly named docker0, and a pair of veth interfaces will be created for the container. One side of the veth pair will remain on the host attached to the bridge while the other side of the pair will be placed inside the container’s namespaces in addition to the loopback interface. An IP address will be allocated for containers on the bridge’s network and traffic will be routed though this bridge to the container.

Containers can communicate via their IP addresses by default. To communicate by name, they must be linked.

Network: host

With the network set to host a container will share the host’s network stack and all interfaces from the host will be available to the container. The container’s hostname will match the hostname on the host system. Note that --mac-address is invalid in host netmode. Even in host network mode a container has its own UTS namespace by default. As such --hostname and --domainname are allowed in host network mode and will only change the hostname and domain name inside the container. Similar to --hostname, the --add-host, --dns, --dns-search, and --dns-option options can be used in host network mode. These options update /etc/hosts or /etc/resolv.conf inside the container. No change are made to /etc/hosts and /etc/resolv.conf on the host.

Compared to the default bridge mode, the host mode gives significantly better networking performance since it uses the host’s native networking stack whereas the bridge has to go through one level of virtualization through the docker daemon. It is recommended to run containers in this mode when their networking performance is critical, for example, a production Load Balancer or a High Performance Web Server.

Note

--network="host" gives the container full access to local system services such as D-bus and is therefore considered insecure.

Network: container

With the network set to container a container will share the network stack of another container. The other container’s name must be provided in the format of --network container:<name|id>. Note that --add-host --hostname --dns --dns-search --dns-option and --mac-address are invalid in container netmode, and --publish --publish-all --expose are also invalid in container netmode.

Example running a Redis container with Redis binding to localhost then running the redis-cli command and connecting to the Redis server over the localhost interface.

$ docker run -d --name redis example/redis --bind 127.0.0.1
+$ # use the redis container's network stack to access localhost
+$ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1
+

User-defined network

You can create a network using a Docker network driver or an external network driver plugin. You can connect multiple containers to the same network. Once connected to a user-defined network, the containers can communicate easily using only another container’s IP address or name.

For overlay networks or custom plugins that support multi-host connectivity, containers connected to the same multi-host network but launched from different Engines can also communicate in this way.

The following example creates a network using the built-in bridge network driver and running a container in the created network

$ docker network create -d bridge my-net
+$ docker run --network=my-net -itd --name=container3 busybox
+

Managing /etc/hosts

Your container will have lines in /etc/hosts which define the hostname of the container itself as well as localhost and a few other common things. The --add-host flag can be used to add additional lines to /etc/hosts.

$ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts
+
+172.17.0.22     09d03f76bf2c
+fe00::0         ip6-localnet
+ff00::0         ip6-mcastprefix
+ff02::1         ip6-allnodes
+ff02::2         ip6-allrouters
+127.0.0.1       localhost
+::1	            localhost ip6-localhost ip6-loopback
+86.75.30.9      db-static
+

If a container is connected to the default bridge network and linked with other containers, then the container’s /etc/hosts file is updated with the linked container’s name.

Note

Since Docker may live update the container’s /etc/hosts file, there may be situations when processes inside the container can end up reading an empty or incomplete /etc/hosts file. In most cases, retrying the read again should fix the problem.

Restart policies (--restart)

Using the --restart flag on Docker run you can specify a restart policy for how a container should or should not be restarted on exit.

When a restart policy is active on a container, it will be shown as either Up or Restarting in docker ps. It can also be useful to use docker events to see the restart policy in effect.

Docker supports the following restart policies:

Policy Result
no Do not automatically restart the container when it exits. This is the default.
on-failure[:max-retries] Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts.
always Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container.
unless-stopped Always restart the container regardless of the exit status, including on daemon startup, except if the container was put into a stopped state before the Docker daemon was stopped.

An increasing delay (double the previous delay, starting at 100 milliseconds) is added before each restart to prevent flooding the server. This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, and so on until either the on-failure limit, the maximum delay of 1 minute is hit, or when you docker stop or docker rm -f the container.

If a container is successfully restarted (the container is started and runs for at least 10 seconds), the delay is reset to its default value of 100 ms.

You can specify the maximum amount of times Docker will try to restart the container when using the on-failure policy. The default is that Docker will try forever to restart the container. The number of (attempted) restarts for a container can be obtained via docker inspect. For example, to get the number of restarts for container “my-container”;

+$ docker inspect -f "{{ .RestartCount }}" my-container
+# 2
+
+

Or, to get the last time the container was (re)started;

+$ docker inspect -f "{{ .State.StartedAt }}" my-container
+# 2015-03-04T23:47:07.691840179Z
+
+

Combining --restart (restart policy) with the --rm (clean up) flag results in an error. On container restart, attached clients are disconnected. See the examples on using the --rm (clean up) flag later in this page.

Examples

$ docker run --restart=always redis
+

This will run the redis container with a restart policy of always so that if the container exits, Docker will restart it.

$ docker run --restart=on-failure:10 redis
+

This will run the redis container with a restart policy of on-failure and a maximum restart count of 10. If the redis container exits with a non-zero exit status more than 10 times in a row Docker will abort trying to restart the container. Providing a maximum restart limit is only valid for the on-failure policy.

Exit Status

The exit code from docker run gives information about why the container failed to run or why it exited. When docker run exits with a non-zero code, the exit codes follow the chroot standard, see below:

125 if the error is with Docker daemon itself

$ docker run --foo busybox; echo $?
+
+flag provided but not defined: --foo
+See 'docker run --help'.
+125
+

126 if the contained command cannot be invoked

$ docker run busybox /etc; echo $?
+
+docker: Error response from daemon: Container command '/etc' could not be invoked.
+126
+

127 if the contained command cannot be found

$ docker run busybox foo; echo $?
+
+docker: Error response from daemon: Container command 'foo' not found or does not exist.
+127
+

Exit code of contained command otherwise

$ docker run busybox /bin/sh -c 'exit 3'
+$ echo $?
+3
+

Clean up (--rm)

By default a container’s file system persists even after the container exits. This makes debugging a lot easier (since you can inspect the final state) and you retain all your data by default. But if you are running short-term foreground processes, these container file systems can really pile up. If instead you’d like Docker to automatically clean up the container and remove the file system when the container exits, you can add the --rm flag:

--rm=false: Automatically remove the container when it exits
+

Note

If you set the --rm flag, Docker also removes the anonymous volumes associated with the container when the container is removed. This is similar to running docker rm -v my-container. Only volumes that are specified without a name are removed. For example, when running:

$ docker run --rm -v /foo -v awesome:/bar busybox top
+

the volume for /foo will be removed, but the volume for /bar will not. Volumes inherited via --volumes-from will be removed with the same logic: if the original volume was specified with a name it will not be removed.

Security configuration

Option Description
--security-opt="label=user:USER" Set the label user for the container
--security-opt="label=role:ROLE" Set the label role for the container
--security-opt="label=type:TYPE" Set the label type for the container
--security-opt="label=level:LEVEL" Set the label level for the container
--security-opt="label=disable" Turn off label confinement for the container
--security-opt="apparmor=PROFILE" Set the apparmor profile to be applied to the container
--security-opt="no-new-privileges:true" Disable container processes from gaining new privileges
--security-opt="seccomp=unconfined" Turn off seccomp confinement for the container
--security-opt="seccomp=profile.json" White-listed syscalls seccomp Json file to be used as a seccomp filter

You can override the default labeling scheme for each container by specifying the --security-opt flag. Specifying the level in the following command allows you to share the same content between containers.

$ docker run --security-opt label=level:s0:c100,c200 -it fedora bash
+

Note

Automatic translation of MLS labels is not currently supported.

To disable the security labeling for this container versus running with the --privileged flag, use the following command:

$ docker run --security-opt label=disable -it fedora bash
+

If you want a tighter security policy on the processes within a container, you can specify an alternate type for the container. You could run a container that is only allowed to listen on Apache ports by executing the following command:

$ docker run --security-opt label=type:svirt_apache_t -it centos bash
+

Note

You would have to write policy defining a svirt_apache_t type.

If you want to prevent your container processes from gaining additional privileges, you can execute the following command:

$ docker run --security-opt no-new-privileges -it centos bash
+

This means that commands that raise privileges such as su or sudo will no longer work. It also causes any seccomp filters to be applied later, after privileges have been dropped which may mean you can have a more restrictive set of filters. For more details, see the kernel documentation.

Specify an init process

You can use the --init flag to indicate that an init process should be used as the PID 1 in the container. Specifying an init process ensures the usual responsibilities of an init system, such as reaping zombie processes, are performed inside the created container.

The default init process used is the first docker-init executable found in the system path of the Docker daemon process. This docker-init binary, included in the default installation, is backed by tini.

Specify custom cgroups

Using the --cgroup-parent flag, you can pass a specific cgroup to run a container in. This allows you to create and manage cgroups on their own. You can define custom resources for those cgroups and put containers under a common parent group.

Runtime constraints on resources

The operator can also adjust the performance parameters of the container:

Option Description
+-m, --memory="" + Memory limit (format: <number>[<unit>]). Number is a positive integer. Unit can be one of b, k, m, or g. Minimum is 4M.
--memory-swap="" Total memory limit (memory + swap, format: <number>[<unit>]). Number is a positive integer. Unit can be one of b, k, m, or g.
--memory-reservation="" Memory soft limit (format: <number>[<unit>]). Number is a positive integer. Unit can be one of b, k, m, or g.
--kernel-memory="" Kernel memory limit (format: <number>[<unit>]). Number is a positive integer. Unit can be one of b, k, m, or g. Minimum is 4M.
+-c, --cpu-shares=0 + CPU shares (relative weight)
--cpus=0.000 Number of CPUs. Number is a fractional number. 0.000 means no limit.
--cpu-period=0 Limit the CPU CFS (Completely Fair Scheduler) period
--cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
--cpu-quota=0 Limit the CPU CFS (Completely Fair Scheduler) quota
--cpu-rt-period=0 Limit the CPU real-time period. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits.
--cpu-rt-runtime=0 Limit the CPU real-time runtime. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits.
--blkio-weight=0 Block IO weight (relative weight) accepts a weight value between 10 and 1000.
--blkio-weight-device="" Block IO weight (relative device weight, format: DEVICE_NAME:WEIGHT)
--device-read-bps="" Limit read rate from a device (format: <device-path>:<number>[<unit>]). Number is a positive integer. Unit can be one of kb, mb, or gb.
--device-write-bps="" Limit write rate to a device (format: <device-path>:<number>[<unit>]). Number is a positive integer. Unit can be one of kb, mb, or gb.
--device-read-iops="" Limit read rate (IO per second) from a device (format: <device-path>:<number>). Number is a positive integer.
--device-write-iops="" Limit write rate (IO per second) to a device (format: <device-path>:<number>). Number is a positive integer.
--oom-kill-disable=false Whether to disable OOM Killer for the container or not.
--oom-score-adj=0 Tune container’s OOM preferences (-1000 to 1000)
--memory-swappiness="" Tune a container’s memory swappiness behavior. Accepts an integer between 0 and 100.
--shm-size="" Size of /dev/shm. The format is <number><unit>. number must be greater than 0. Unit is optional and can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses 64m.

User memory constraints

We have four ways to set user memory usage:

Option Result
+memory=inf, memory-swap=inf (default) There is no memory limit for the container. The container can use as much memory as needed.
memory=L<inf, memory-swap=inf (specify memory and set memory-swap as -1) The container is not allowed to use more than L bytes of memory, but can use as much swap as is needed (if the host supports swap memory).
memory=L<inf, memory-swap=2*L (specify memory without memory-swap) The container is not allowed to use more than L bytes of memory, swap plus memory usage is double of that.
memory=L<inf, memory-swap=S<inf, L<=S (specify both memory and memory-swap) The container is not allowed to use more than L bytes of memory, swap plus memory usage is limited by S.

Examples:

$ docker run -it ubuntu:14.04 /bin/bash
+

We set nothing about memory, this means the processes in the container can use as much memory and swap memory as they need.

$ docker run -it -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash
+

We set memory limit and disabled swap memory limit, this means the processes in the container can use 300M memory and as much swap memory as they need (if the host supports swap memory).

$ docker run -it -m 300M ubuntu:14.04 /bin/bash
+

We set memory limit only, this means the processes in the container can use 300M memory and 300M swap memory, by default, the total virtual memory size (--memory-swap) will be set as double of memory, in this case, memory + swap would be 2*300M, so processes can use 300M swap memory as well.

$ docker run -it -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash
+

We set both memory and swap memory, so the processes in the container can use 300M memory and 700M swap memory.

Memory reservation is a kind of memory soft limit that allows for greater sharing of memory. Under normal circumstances, containers can use as much of the memory as needed and are constrained only by the hard limits set with the -m/--memory option. When memory reservation is set, Docker detects memory contention or low memory and forces containers to restrict their consumption to a reservation limit.

Always set the memory reservation value below the hard limit, otherwise the hard limit takes precedence. A reservation of 0 is the same as setting no reservation. By default (without reservation set), memory reservation is the same as the hard memory limit.

Memory reservation is a soft-limit feature and does not guarantee the limit won’t be exceeded. Instead, the feature attempts to ensure that, when memory is heavily contended for, memory is allocated based on the reservation hints/setup.

The following example limits the memory (-m) to 500M and sets the memory reservation to 200M.

$ docker run -it -m 500M --memory-reservation 200M ubuntu:14.04 /bin/bash
+

Under this configuration, when the container consumes memory more than 200M and less than 500M, the next system memory reclaim attempts to shrink container memory below 200M.

The following example set memory reservation to 1G without a hard memory limit.

$ docker run -it --memory-reservation 1G ubuntu:14.04 /bin/bash
+

The container can use as much memory as it needs. The memory reservation setting ensures the container doesn’t consume too much memory for long time, because every memory reclaim shrinks the container’s consumption to the reservation.

By default, kernel kills processes in a container if an out-of-memory (OOM) error occurs. To change this behaviour, use the --oom-kill-disable option. Only disable the OOM killer on containers where you have also set the -m/--memory option. If the -m flag is not set, this can result in the host running out of memory and require killing the host’s system processes to free memory.

The following example limits the memory to 100M and disables the OOM killer for this container:

$ docker run -it -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash
+

The following example, illustrates a dangerous way to use the flag:

$ docker run -it --oom-kill-disable ubuntu:14.04 /bin/bash
+

The container has unlimited memory which can cause the host to run out memory and require killing system processes to free memory. The --oom-score-adj parameter can be changed to select the priority of which containers will be killed when the system is out of memory, with negative scores making them less likely to be killed, and positive scores more likely.

Kernel memory constraints

Kernel memory is fundamentally different than user memory as kernel memory can’t be swapped out. The inability to swap makes it possible for the container to block system services by consuming too much kernel memory. Kernel memory includes:

You can setup kernel memory limit to constrain these kinds of memory. For example, every process consumes some stack pages. By limiting kernel memory, you can prevent new processes from being created when the kernel memory usage is too high.

Kernel memory is never completely independent of user memory. Instead, you limit kernel memory in the context of the user memory limit. Assume “U” is the user memory limit and “K” the kernel limit. There are three possible ways to set limits:

Option Result
+U != 0, K = inf (default) This is the standard memory limitation mechanism already present before using kernel memory. Kernel memory is completely ignored.
U != 0, K < U Kernel memory is a subset of the user memory. This setup is useful in deployments where the total amount of memory per-cgroup is overcommitted. Overcommitting kernel memory limits is definitely not recommended, since the box can still run out of non-reclaimable memory. In this case, you can configure K so that the sum of all groups is never greater than the total memory. Then, freely set U at the expense of the system's service quality.
U != 0, K > U Since kernel memory charges are also fed to the user counter and reclamation is triggered for the container for both kinds of memory. This configuration gives the admin a unified view of memory. It is also useful for people who just want to track kernel memory usage.

Examples:

$ docker run -it -m 500M --kernel-memory 50M ubuntu:14.04 /bin/bash
+

We set memory and kernel memory, so the processes in the container can use 500M memory in total, in this 500M memory, it can be 50M kernel memory tops.

$ docker run -it --kernel-memory 50M ubuntu:14.04 /bin/bash
+

We set kernel memory without -m, so the processes in the container can use as much memory as they want, but they can only use 50M kernel memory.

Swappiness constraint

By default, a container’s kernel can swap out a percentage of anonymous pages. To set this percentage for a container, specify a --memory-swappiness value between 0 and 100. A value of 0 turns off anonymous page swapping. A value of 100 sets all anonymous pages as swappable. By default, if you are not using --memory-swappiness, memory swappiness value will be inherited from the parent.

For example, you can set:

$ docker run -it --memory-swappiness=0 ubuntu:14.04 /bin/bash
+

Setting the --memory-swappiness option is helpful when you want to retain the container’s working set and to avoid swapping performance penalties.

CPU share constraint

By default, all containers get the same proportion of CPU cycles. This proportion can be modified by changing the container’s CPU share weighting relative to the weighting of all other running containers.

To modify the proportion from the default of 1024, use the -c or --cpu-shares flag to set the weighting to 2 or higher. If 0 is set, the system will ignore the value and use the default of 1024.

The proportion will only apply when CPU-intensive processes are running. When tasks in one container are idle, other containers can use the left-over CPU time. The actual amount of CPU time will vary depending on the number of containers running on the system.

For example, consider three containers, one has a cpu-share of 1024 and two others have a cpu-share setting of 512. When processes in all three containers attempt to use 100% of CPU, the first container would receive 50% of the total CPU time. If you add a fourth container with a cpu-share of 1024, the first container only gets 33% of the CPU. The remaining containers receive 16.5%, 16.5% and 33% of the CPU.

On a multi-core system, the shares of CPU time are distributed over all CPU cores. Even if a container is limited to less than 100% of CPU time, it can use 100% of each individual CPU core.

For example, consider a system with more than three cores. If you start one container {C0} with -c=512 running one process, and another container {C1} with -c=1024 running two processes, this can result in the following division of CPU shares:

PID    container	CPU	CPU share
+100    {C0}		0	100% of CPU0
+101    {C1}		1	100% of CPU1
+102    {C1}		2	100% of CPU2
+

CPU period constraint

The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use --cpu-period to set the period of CPUs to limit the container’s CPU usage. And usually --cpu-period should work with --cpu-quota.

Examples:

$ docker run -it --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash
+

If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms.

In addition to use --cpu-period and --cpu-quota for setting CPU period constraints, it is possible to specify --cpus with a float number to achieve the same purpose. For example, if there is 1 CPU, then --cpus=0.5 will achieve the same result as setting --cpu-period=50000 and --cpu-quota=25000 (50% CPU).

The default value for --cpus is 0.000, which means there is no limit.

For more information, see the CFS documentation on bandwidth limiting.

Cpuset constraint

We can set cpus in which to allow execution for containers.

Examples:

$ docker run -it --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash
+

This means processes in container can be executed on cpu 1 and cpu 3.

$ docker run -it --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash
+

This means processes in container can be executed on cpu 0, cpu 1 and cpu 2.

We can set mems in which to allow execution for containers. Only effective on NUMA systems.

Examples:

$ docker run -it --cpuset-mems="1,3" ubuntu:14.04 /bin/bash
+

This example restricts the processes in the container to only use memory from memory nodes 1 and 3.

$ docker run -it --cpuset-mems="0-2" ubuntu:14.04 /bin/bash
+

This example restricts the processes in the container to only use memory from memory nodes 0, 1 and 2.

CPU quota constraint

The --cpu-quota flag limits the container’s CPU usage. The default 0 value allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair Scheduler) handles resource allocation for executing processes and is default Linux Scheduler used by the kernel. Set this value to 50000 to limit the container to 50% of a CPU resource. For multiple CPUs, adjust the --cpu-quota as necessary. For more information, see the CFS documentation on bandwidth limiting.

Block IO bandwidth (Blkio) constraint

By default, all containers get the same proportion of block IO bandwidth (blkio). This proportion is 500. To modify this proportion, change the container’s blkio weight relative to the weighting of all other running containers using the --blkio-weight flag.

Note:

The blkio weight setting is only available for direct IO. Buffered IO is not currently supported.

The --blkio-weight flag can set the weighting to a value between 10 to 1000. For example, the commands below create two containers with different blkio weight:

$ docker run -it --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash
+$ docker run -it --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash
+

If you do block IO in the two containers at the same time, by, for example:

$ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct
+

You’ll find that the proportion of time is the same as the proportion of blkio weights of the two containers.

The --blkio-weight-device="DEVICE_NAME:WEIGHT" flag sets a specific device weight. The DEVICE_NAME:WEIGHT is a string containing a colon-separated device name and weight. For example, to set /dev/sda device weight to 200:

$ docker run -it \
+    --blkio-weight-device "/dev/sda:200" \
+    ubuntu
+

If you specify both the --blkio-weight and --blkio-weight-device, Docker uses the --blkio-weight as the default weight and uses --blkio-weight-device to override this default with a new value on a specific device. The following example uses a default weight of 300 and overrides this default on /dev/sda setting that weight to 200:

$ docker run -it \
+    --blkio-weight 300 \
+    --blkio-weight-device "/dev/sda:200" \
+    ubuntu
+

The --device-read-bps flag limits the read rate (bytes per second) from a device. For example, this command creates a container and limits the read rate to 1mb per second from /dev/sda:

$ docker run -it --device-read-bps /dev/sda:1mb ubuntu
+

The --device-write-bps flag limits the write rate (bytes per second) to a device. For example, this command creates a container and limits the write rate to 1mb per second for /dev/sda:

$ docker run -it --device-write-bps /dev/sda:1mb ubuntu
+

Both flags take limits in the <device-path>:<limit>[unit] format. Both read and write rates must be a positive integer. You can specify the rate in kb (kilobytes), mb (megabytes), or gb (gigabytes).

The --device-read-iops flag limits read rate (IO per second) from a device. For example, this command creates a container and limits the read rate to 1000 IO per second from /dev/sda:

$ docker run -ti --device-read-iops /dev/sda:1000 ubuntu
+

The --device-write-iops flag limits write rate (IO per second) to a device. For example, this command creates a container and limits the write rate to 1000 IO per second to /dev/sda:

$ docker run -ti --device-write-iops /dev/sda:1000 ubuntu
+

Both flags take limits in the <device-path>:<limit> format. Both read and write rates must be a positive integer.

Additional groups

--group-add: Add additional groups to run as
+

By default, the docker container process runs with the supplementary groups looked up for the specified user. If one wants to add more to that list of groups, then one can use this flag:

$ docker run --rm --group-add audio --group-add nogroup --group-add 777 busybox id
+
+uid=0(root) gid=0(root) groups=10(wheel),29(audio),99(nogroup),777
+

Runtime privilege and Linux capabilities

Option Description
--cap-add Add Linux capabilities
--cap-drop Drop Linux capabilities
--privileged Give extended privileges to this container
--device=[] Allows you to run devices inside the container without the --privileged flag.

By default, Docker containers are “unprivileged” and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a “privileged” container is given access to all devices (see the documentation on cgroups devices).

The --privileged flag gives all capabilities to the container. When the operator executes docker run --privileged, Docker will enable access to all devices on the host as well as set some configuration in AppArmor or SELinux to allow the container nearly all the same access to the host as processes running outside containers on the host. Additional information about running with --privileged is available on the Docker Blog.

If you want to limit access to a specific device or devices you can use the --device flag. It allows you to specify one or more devices that will be accessible within the container.

$ docker run --device=/dev/snd:/dev/snd ...
+

By default, the container will be able to read, write, and mknod these devices. This can be overridden using a third :rwm set of options to each --device flag:

$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
+
+Command (m for help): q
+$ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk  /dev/xvdc
+You will not be able to write the partition table.
+
+Command (m for help): q
+
+$ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk  /dev/xvdc
+    crash....
+
+$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk  /dev/xvdc
+fdisk: unable to open /dev/xvdc: Operation not permitted
+

In addition to --privileged, the operator can have fine grain control over the capabilities using --cap-add and --cap-drop. By default, Docker has a default list of capabilities that are kept. The following table lists the Linux capability options which are allowed by default and can be dropped.

Capability Key Capability Description
AUDIT_WRITE Write records to kernel auditing log.
CHOWN Make arbitrary changes to file UIDs and GIDs (see chown(2)).
DAC_OVERRIDE Bypass file read, write, and execute permission checks.
FOWNER Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file.
FSETID Don’t clear set-user-ID and set-group-ID permission bits when a file is modified.
KILL Bypass permission checks for sending signals.
MKNOD Create special files using mknod(2).
NET_BIND_SERVICE Bind a socket to internet domain privileged ports (port numbers less than 1024).
NET_RAW Use RAW and PACKET sockets.
SETFCAP Set file capabilities.
SETGID Make arbitrary manipulations of process GIDs and supplementary GID list.
SETPCAP Modify process capabilities.
SETUID Make arbitrary manipulations of process UIDs.
SYS_CHROOT Use chroot(2), change root directory.

The next table shows the capabilities which are not granted by default and may be added.

Capability Key Capability Description
AUDIT_CONTROL Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules.
AUDIT_READ Allow reading the audit log via multicast netlink socket.
BLOCK_SUSPEND Allow preventing system suspends.
BPF Allow creating BPF maps, loading BPF Type Format (BTF) data, retrieve JITed code of BPF programs, and more.
CHECKPOINT_RESTORE Allow checkpoint/restore related operations. Introduced in kernel 5.9.
DAC_READ_SEARCH Bypass file read permission checks and directory read and execute permission checks.
IPC_LOCK Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)).
IPC_OWNER Bypass permission checks for operations on System V IPC objects.
LEASE Establish leases on arbitrary files (see fcntl(2)).
LINUX_IMMUTABLE Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags.
MAC_ADMIN Allow MAC configuration or state changes. Implemented for the Smack LSM.
MAC_OVERRIDE Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM).
NET_ADMIN Perform various network-related operations.
NET_BROADCAST Make socket broadcasts, and listen to multicasts.
PERFMON Allow system performance and observability privileged operations using perf_events, i915_perf and other kernel subsystems
SYS_ADMIN Perform a range of system administration operations.
SYS_BOOT Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution.
SYS_MODULE Load and unload kernel modules.
SYS_NICE Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes.
SYS_PACCT Use acct(2), switch process accounting on or off.
SYS_PTRACE Trace arbitrary processes using ptrace(2).
SYS_RAWIO Perform I/O port operations (iopl(2) and ioperm(2)).
SYS_RESOURCE Override resource Limits.
SYS_TIME Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock.
SYS_TTY_CONFIG Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals.
SYSLOG Perform privileged syslog(2) operations.
WAKE_ALARM Trigger something that will wake up the system.

Further reference information is available on the capabilities(7) - Linux man page, and in the Linux kernel source code.

Both flags support the value ALL, so to allow a container to use all capabilities except for MKNOD:

$ docker run --cap-add=ALL --cap-drop=MKNOD ...
+

The --cap-add and --cap-drop flags accept capabilities to be specified with a CAP_ prefix. The following examples are therefore equivalent:

$ docker run --cap-add=SYS_ADMIN ...
+$ docker run --cap-add=CAP_SYS_ADMIN ...
+

For interacting with the network stack, instead of using --privileged they should use --cap-add=NET_ADMIN to modify the network interfaces.

$ docker run -it --rm  ubuntu:14.04 ip link add dummy0 type dummy
+
+RTNETLINK answers: Operation not permitted
+
+$ docker run -it --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy
+

To mount a FUSE based filesystem, you need to combine both --cap-add and --device:

$ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt
+
+fuse: failed to open /dev/fuse: Operation not permitted
+
+$ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt
+
+fusermount: mount failed: Operation not permitted
+
+$ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs
+
+# sshfs sven@10.10.10.20:/home/sven /mnt
+The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established.
+ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6.
+Are you sure you want to continue connecting (yes/no)? yes
+sven@10.10.10.20's password:
+
+root@30aa0cfaf1b5:/# ls -la /mnt/src/docker
+
+total 1516
+drwxrwxr-x 1 1000 1000   4096 Dec  4 06:08 .
+drwxrwxr-x 1 1000 1000   4096 Dec  4 11:46 ..
+-rw-rw-r-- 1 1000 1000     16 Oct  8 00:09 .dockerignore
+-rwxrwxr-x 1 1000 1000    464 Oct  8 00:09 .drone.yml
+drwxrwxr-x 1 1000 1000   4096 Dec  4 06:11 .git
+-rw-rw-r-- 1 1000 1000    461 Dec  4 06:08 .gitignore
+....
+

The default seccomp profile will adjust to the selected capabilities, in order to allow use of facilities allowed by the capabilities, so you should not have to adjust this.

Logging drivers (--log-driver)

The container can have a different logging driver than the Docker daemon. Use the --log-driver=VALUE with the docker run command to configure the container’s logging driver. The following options are supported:

Driver Description
none Disables any logging for the container. docker logs won’t be available with this driver.
local Logs are stored in a custom format designed for minimal overhead.
json-file Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver.
syslog Syslog logging driver for Docker. Writes log messages to syslog.
journald Journald logging driver for Docker. Writes log messages to journald.
gelf Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash.
fluentd Fluentd logging driver for Docker. Writes log messages to fluentd (forward input).
awslogs Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs.
splunk Splunk logging driver for Docker. Writes log messages to splunk using Event Http Collector.
etwlogs Event Tracing for Windows (ETW) events. Writes log messages as Event Tracing for Windows (ETW) events. Only Windows platforms.
gcplogs Google Cloud Platform (GCP) Logging. Writes log messages to Google Cloud Platform (GCP) Logging.
logentries Rapid7 Logentries. Writes log messages to Rapid7 Logentries.

The docker logs command is available only for the json-file and journald logging drivers. For detailed information on working with logging drivers, see Configure logging drivers.

Overriding Dockerfile image defaults

When a developer builds an image from a Dockerfile or when she commits it, the developer can set a number of default parameters that take effect when the image starts up as a container.

Four of the Dockerfile commands cannot be overridden at runtime: FROM, MAINTAINER, RUN, and ADD. Everything else has a corresponding override in docker run. We’ll go through what the developer might have set in each Dockerfile instruction and how the operator can override that setting.

CMD (default command or options)

Recall the optional COMMAND in the Docker commandline:

$ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...]
+

This command is optional because the person who created the IMAGE may have already provided a default COMMAND using the Dockerfile CMD instruction. As the operator (the person running a container from the image), you can override that CMD instruction just by specifying a new COMMAND.

If the image also specifies an ENTRYPOINT then the CMD or COMMAND get appended as arguments to the ENTRYPOINT.

ENTRYPOINT (default command to execute at runtime)

    --entrypoint="": Overwrite the default entrypoint set by the image
+

The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a string to specify the new ENTRYPOINT. Here is an example of how to run a shell in a container that has been set up to automatically run something else (like /usr/bin/redis-server):

$ docker run -it --entrypoint /bin/bash example/redis
+

or two examples of how to pass more parameters to that ENTRYPOINT:

$ docker run -it --entrypoint /bin/bash example/redis -c ls -l
+$ docker run -it --entrypoint /usr/bin/redis-cli example/redis --help
+

You can reset a containers entrypoint by passing an empty string, for example:

$ docker run -it --entrypoint="" mysql bash
+

Note

Passing --entrypoint will clear out any default command set on the image (i.e. any CMD instruction in the Dockerfile used to build it).

EXPOSE (incoming ports)

The following run command options work with container networking:

--expose=[]: Expose a port or a range of ports inside the container.
+             These are additional to those exposed by the `EXPOSE` instruction
+-P         : Publish all exposed ports to the host interfaces
+-p=[]      : Publish a container's port or a range of ports to the host
+               format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort
+               Both hostPort and containerPort can be specified as a
+               range of ports. When specifying ranges for both, the
+               number of container ports in the range must match the
+               number of host ports in the range, for example:
+                   -p 1234-1236:1234-1236/tcp
+
+               When specifying a range for hostPort only, the
+               containerPort must not be a range.  In this case the
+               container port is published somewhere within the
+               specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`)
+
+               (use 'docker port' to see the actual mapping)
+
+--link=""  : Add link to another container (<name or id>:alias or <name or id>)
+

With the exception of the EXPOSE directive, an image developer hasn’t got much control over networking. The EXPOSE instruction defines the initial incoming ports that provide services. These ports are available to processes inside the container. An operator can use the --expose option to add to the exposed ports.

To expose a container’s internal port, an operator can start the container with the -P or -p flag. The exposed port is accessible on the host and the ports are available to any client that can reach the host.

The -P option publishes all the ports to the host interfaces. Docker binds each exposed port to a random port on the host. The range of ports are within an ephemeral port range defined by /proc/sys/net/ipv4/ip_local_port_range. Use the -p flag to explicitly map a single port or range of ports.

The port number inside the container (where the service listens) does not need to match the port number exposed on the outside of the container (where clients connect). For example, inside the container an HTTP service is listening on port 80 (and so the image developer specifies EXPOSE 80 in the Dockerfile). At runtime, the port might be bound to 42800 on the host. To find the mapping between the host ports and the exposed ports, use docker port.

If the operator uses --link when starting a new client container in the default bridge network, then the client container can access the exposed port via a private networking interface. If --link is used when starting a container in a user-defined network as described in Networking overview, it will provide a named alias for the container being linked to.

ENV (environment variables)

Docker automatically sets some environment variables when creating a Linux container. Docker does not set any environment variables when creating a Windows container.

The following environment variables are set for Linux containers:

Variable Value
HOME Set based on the value of USER +
HOSTNAME The hostname associated with the container
PATH Includes popular directories, such as /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +
TERM +xterm if the container is allocated a pseudo-TTY

Additionally, the operator can set any environment variable in the container by using one or more -e flags, even overriding those mentioned above, or already defined by the developer with a Dockerfile ENV. If the operator names an environment variable without specifying a value, then the current value of the named variable is propagated into the container’s environment:

$ export today=Wednesday
+$ docker run -e "deep=purple" -e today --rm alpine env
+
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+HOSTNAME=d2219b854598
+deep=purple
+today=Wednesday
+HOME=/root
+
PS C:\> docker run --rm -e "foo=bar" microsoft/nanoserver cmd /s /c set
+ALLUSERSPROFILE=C:\ProgramData
+APPDATA=C:\Users\ContainerAdministrator\AppData\Roaming
+CommonProgramFiles=C:\Program Files\Common Files
+CommonProgramFiles(x86)=C:\Program Files (x86)\Common Files
+CommonProgramW6432=C:\Program Files\Common Files
+COMPUTERNAME=C2FAEFCC8253
+ComSpec=C:\Windows\system32\cmd.exe
+foo=bar
+LOCALAPPDATA=C:\Users\ContainerAdministrator\AppData\Local
+NUMBER_OF_PROCESSORS=8
+OS=Windows_NT
+Path=C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Users\ContainerAdministrator\AppData\Local\Microsoft\WindowsApps
+PATHEXT=.COM;.EXE;.BAT;.CMD
+PROCESSOR_ARCHITECTURE=AMD64
+PROCESSOR_IDENTIFIER=Intel64 Family 6 Model 62 Stepping 4, GenuineIntel
+PROCESSOR_LEVEL=6
+PROCESSOR_REVISION=3e04
+ProgramData=C:\ProgramData
+ProgramFiles=C:\Program Files
+ProgramFiles(x86)=C:\Program Files (x86)
+ProgramW6432=C:\Program Files
+PROMPT=$P$G
+PUBLIC=C:\Users\Public
+SystemDrive=C:
+SystemRoot=C:\Windows
+TEMP=C:\Users\ContainerAdministrator\AppData\Local\Temp
+TMP=C:\Users\ContainerAdministrator\AppData\Local\Temp
+USERDOMAIN=User Manager
+USERNAME=ContainerAdministrator
+USERPROFILE=C:\Users\ContainerAdministrator
+windir=C:\Windows
+

Similarly the operator can set the HOSTNAME (Linux) or COMPUTERNAME (Windows) with -h.

HEALTHCHECK

  --health-cmd            Command to run to check health
+  --health-interval       Time between running the check
+  --health-retries        Consecutive failures needed to report unhealthy
+  --health-timeout        Maximum time to allow one check to run
+  --health-start-period   Start period for the container to initialize before starting health-retries countdown
+  --no-healthcheck        Disable any container-specified HEALTHCHECK
+

Example:

+$ docker run --name=test -d \
+    --health-cmd='stat /etc/passwd || exit 1' \
+    --health-interval=2s \
+    busybox sleep 1d
+$ sleep 2; docker inspect --format='{{.State.Health.Status}}' test
+healthy
+$ docker exec test rm /etc/passwd
+$ sleep 2; docker inspect --format='{{json .State.Health}}' test
+{
+  "Status": "unhealthy",
+  "FailingStreak": 3,
+  "Log": [
+    {
+      "Start": "2016-05-25T17:22:04.635478668Z",
+      "End": "2016-05-25T17:22:04.7272552Z",
+      "ExitCode": 0,
+      "Output": "  File: /etc/passwd\n  Size: 334       \tBlocks: 8          IO Block: 4096   regular file\nDevice: 32h/50d\tInode: 12          Links: 1\nAccess: (0664/-rw-rw-r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..."
+    },
+    {
+      "Start": "2016-05-25T17:22:06.732900633Z",
+      "End": "2016-05-25T17:22:06.822168935Z",
+      "ExitCode": 0,
+      "Output": "  File: /etc/passwd\n  Size: 334       \tBlocks: 8          IO Block: 4096   regular file\nDevice: 32h/50d\tInode: 12          Links: 1\nAccess: (0664/-rw-rw-r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..."
+    },
+    {
+      "Start": "2016-05-25T17:22:08.823956535Z",
+      "End": "2016-05-25T17:22:08.897359124Z",
+      "ExitCode": 1,
+      "Output": "stat: can't stat '/etc/passwd': No such file or directory\n"
+    },
+    {
+      "Start": "2016-05-25T17:22:10.898802931Z",
+      "End": "2016-05-25T17:22:10.969631866Z",
+      "ExitCode": 1,
+      "Output": "stat: can't stat '/etc/passwd': No such file or directory\n"
+    },
+    {
+      "Start": "2016-05-25T17:22:12.971033523Z",
+      "End": "2016-05-25T17:22:13.082015516Z",
+      "ExitCode": 1,
+      "Output": "stat: can't stat '/etc/passwd': No such file or directory\n"
+    }
+  ]
+}
+
+

The health status is also displayed in the docker ps output.

TMPFS (mount tmpfs filesystems)

--tmpfs=[]: Create a tmpfs mount with: container-dir[:<options>],
+            where the options are identical to the Linux
+            'mount -t tmpfs -o' command.
+

The example below mounts an empty tmpfs into the container with the rw, noexec, nosuid, and size=65536k options.

$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image
+

VOLUME (shared filesystems)

-v, --volume=[host-src:]container-dest[:<options>]: Bind mount a volume.
+The comma-delimited `options` are [rw|ro], [z|Z],
+[[r]shared|[r]slave|[r]private], and [nocopy].
+The 'host-src' is an absolute path or a name value.
+
+If neither 'rw' or 'ro' is specified then the volume is mounted in
+read-write mode.
+
+The `nocopy` mode is used to disable automatically copying the requested volume
+path in the container to the volume storage location.
+For named volumes, `copy` is the default mode. Copy modes are not supported
+for bind-mounted volumes.
+
+--volumes-from="": Mount all volumes from the given container(s)
+

Note

When using systemd to manage the Docker daemon’s start and stop, in the systemd unit file there is an option to control mount propagation for the Docker daemon itself, called MountFlags. The value of this setting may cause Docker to not see mount propagation changes made on the mount point. For example, if this value is slave, you may not be able to use the shared or rshared propagation on a volume.

The volumes commands are complex enough to have their own documentation in section Use volumes. A developer can define one or more VOLUME’s associated with an image, but only the operator can give access from one container to another (or from a container to a volume mounted on the host).

The container-dest must always be an absolute path such as /src/docs. The host-src can either be an absolute path or a name value. If you supply an absolute path for the host-src, Docker bind-mounts to the path you specify. If you supply a name, Docker creates a named volume by that name.

A name value must start with an alphanumeric character, followed by a-z0-9, _ (underscore), . (period) or - (hyphen). An absolute path starts with a / (forward slash).

For example, you can specify either /foo or foo for a host-src value. If you supply the /foo value, Docker creates a bind mount. If you supply the foo specification, Docker creates a named volume.

USER

root (id = 0) is the default user within a container. The image developer can create additional users. Those users are accessible by name. When passing a numeric ID, the user does not have to exist in the container.

The developer can set a default user to run the first process with the Dockerfile USER instruction. When starting a container, the operator can override the USER instruction by passing the -u option.

-u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command.
+
+The followings examples are all valid:
+--user=[ user | user:group | uid | uid:gid | user:gid | uid:group ]
+

Note: if you pass a numeric uid, it must be in the range of 0-2147483647.

WORKDIR

The default working directory for running binaries within a container is the root directory (/). It is possible to set a different working directory with the Dockerfile WORKDIR command. The operator can override this with:

-w="", --workdir="": Working directory inside the container
+
+

docker, run, configure, runtime

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/reference/run/ +

+
diff --git a/devdocs/docker/engine%2Frelease-notes%2Findex.html b/devdocs/docker/engine%2Frelease-notes%2Findex.html new file mode 100644 index 00000000..54e76712 --- /dev/null +++ b/devdocs/docker/engine%2Frelease-notes%2Findex.html @@ -0,0 +1,217 @@ +

Docker Engine release notes

+

This document describes the latest changes, additions, known issues, and fixes for Docker Engine.

Version 20.10

20.10.16

2022-05-12

This release of Docker Engine fixes a regression in the Docker CLI builds for macOS, fixes an issue with docker stats when using containerd 1.5 and up, and updates the Go runtime to include a fix for CVE-2022-29526.

Client

Daemon

Packaging

20.10.15

2022-05-05

This release of Docker Engine comes with updated versions of the compose, buildx, containerd, and runc components, as well as some minor bug fixes.

Known issues

We’ve identified an issue with the macOS CLI binaries in the 20.10.15 release. This issue has been resolved in the 20.10.16 release.

Daemon

Packaging

20.10.14

2022-03-23

This release of Docker Engine updates the default inheritable capabilities for containers to address CVE-2022-24769, a new version of the containerd.io runtime is also included to address the same issue.

Daemon

Builder

Packaging

20.10.13

2022-03-10

This release of Docker Engine contains some bug-fixes and packaging changes, updates to the docker scan and docker buildx commands, an updated version of the Go runtime, and new versions of the containerd.io runtime. Together with this release, we now also provide .deb and .rpm packages of Docker Compose V2, which can be installed using the (optional) docker-compose-plugin package.

Builder

Daemon

Distribution

Documentation

Logging

Packaging

20.10.12

2021-12-13

This release of Docker Engine contains changes in packaging only, and provides updates to the docker scan and docker buildx commands. Versions of docker scan before v0.11.0 are not able to detect the Log4j 2 CVE-2021-44228. We are shipping an updated version of docker scan in this release to help you scan your images for this vulnerability.

Note

The docker scan command on Linux is currently only supported on x86 platforms. We do not yet provide a package for other hardware architectures on Linux.

The docker scan feature is provided as a separate package and, depending on your upgrade or installation method, ‘docker scan’ may not be updated automatically to the latest version. Use the instructions below to update docker scan to the latest version. You can also use these instructions to install, or upgrade the docker scan package without upgrading the Docker Engine:

On .deb based distros, such as Ubuntu and Debian:

$ apt-get update && apt-get install docker-scan-plugin
+

On rpm-based distros, such as CentOS or Fedora:

$ yum install docker-scan-plugin
+

After upgrading, verify you have the latest version of docker scan installed:

$ docker scan --accept-license --version
+Version:    v0.12.0
+Git commit: 1074dd0
+Provider:   Snyk (1.790.0 (standalone))
+

Read our blog post on CVE-2021-44228 to learn how to use the docker scan command to check if images are vulnerable.

Packaging

20.10.11

2021-11-17

IMPORTANT

Due to net/http changes in Go 1.16, HTTP proxies configured through the $HTTP_PROXY environment variable are no longer used for TLS (https://) connections. Make sure you also set an $HTTPS_PROXY environment variable for handling requests to https:// URLs.

Refer to the HTTP/HTTPS proxy section to learn how to configure the Docker Daemon to use a proxy server.

Distribution

Windows

Packaging

20.10.10

2021-10-25

IMPORTANT

Due to net/http changes in Go 1.16, HTTP proxies configured through the $HTTP_PROXY environment variable are no longer used for TLS (https://) connections. Make sure you also set an $HTTPS_PROXY environment variable for handling requests to https:// URLs.

Refer to the HTTP/HTTPS proxy section to learn how to configure the Docker Daemon to use a proxy server.

Builder

Runtime

Swarm

Packaging

20.10.9

2021-10-04

This release is a security release with security fixes in the CLI, runtime, as well as updated versions of the containerd.io package.

IMPORTANT

Due to net/http changes in Go 1.16, HTTP proxies configured through the $HTTP_PROXY environment variable are no longer used for TLS (https://) connections. Make sure you also set an $HTTPS_PROXY environment variable for handling requests to https:// URLs.

Refer to the HTTP/HTTPS proxy section to learn how to configure the Docker Daemon to use a proxy server.

Client

Runtime

Packaging

Known issue

The ctr binary shipping with the static packages of this release is not statically linked, and will not run in Docker images using alpine as a base image. Users can install the libc6-compat package, or download a previous version of the ctr binary as a workaround. Refer to the containerd ticket related to this issue for more details: containerd/containerd#5824.

20.10.8

2021-08-03

IMPORTANT

Due to net/http changes in Go 1.16, HTTP proxies configured through the $HTTP_PROXY environment variable are no longer used for TLS (https://) connections. Make sure you also set an $HTTPS_PROXY environment variable for handling requests to https:// URLs.

Refer to the HTTP/HTTPS proxy section to learn how to configure the Docker Daemon to use a proxy server.

Deprecation

Client

Rootless

Runtime

Swarm

Packaging

Known issue

The ctr binary shipping with the static packages of this release is not statically linked, and will not run in Docker images using alpine as a base image. Users can install the libc6-compat package, or download a previous version of the ctr binary as a workaround. Refer to the containerd ticket related to this issue for more details: containerd/containerd#5824.

20.10.7

2021-06-02

Client

Builder

Logging

Rootless

Networking

Contrib

Packaging

20.10.6

2021-04-12

Client

Builder

Logging

Networking

Packaging

Plugins

Rootless

20.10.5

2021-03-02

Client

20.10.4

2021-02-26

Builder

Client

Runtime

Logger

Rootless

Security

Swarm

20.10.3

2021-02-01

Security

Client

20.10.2

2021-01-04

Runtime

Networking

Swarm

Packaging

20.10.1

2020-12-14

Builder

Packaging

20.10.0

2020-12-08

Deprecation / Removal

For an overview of all deprecated features, refer to the Deprecated Engine Features page.

API

Builder

Client

Logging

Runtime

Networking

Packaging

Rootless

Security

Swarm

+

docker, docker engine, ce, whats new, release notes

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/release-notes/ +

+
diff --git a/devdocs/docker/engine%2Fscan%2Findex.html b/devdocs/docker/engine%2Fscan%2Findex.html new file mode 100644 index 00000000..0b7a378e --- /dev/null +++ b/devdocs/docker/engine%2Fscan%2Findex.html @@ -0,0 +1,275 @@ +

Vulnerability scanning for Docker local images

+ +

Looking to speed up your development cycles? Quickly detect and learn how to remediate CVEs in your images by running docker scan IMAGE_NAME. Check out How to scan images for details.

Vulnerability scanning for Docker local images allows developers and development teams to review the security state of the container images and take actions to fix issues identified during the scan, resulting in more secure deployments. Docker Scan runs on Snyk engine, providing users with visibility into the security posture of their local Dockerfiles and local images.

Users trigger vulnerability scans through the CLI, and use the CLI to view the scan results. The scan results contain a list of Common Vulnerabilities and Exposures (CVEs), the sources, such as OS packages and libraries, versions in which they were introduced, and a recommended fixed version (if available) to remediate the CVEs discovered.

Log4j 2 CVE-2021-44228

Versions of docker Scan earlier than v0.11.0 are not able to detect Log4j 2 CVE-2021-44228. You must update your Docker Desktop installation to 4.3.1 or higher to fix this issue. For more information, see Scan images for Log4j 2 CVE.

For information about the system requirements to run vulnerability scanning, see Prerequisites.

This page contains information about the docker scan CLI command. For information about automatically scanning Docker images through Docker Hub, see Hub Vulnerability Scanning.

Scan images for Log4j 2 CVE

Docker Scan versions earlier than v0.11.0 do not detect Log4j 2 CVE-2021-44228 when you scan your images for vulnerabilities. You must update your Docker installation to the latest version to fix this issue.

If you are using the docker scan plugin shipped with Docker Desktop, update Docker Desktop to version 4.3.1 or higher. See the release notes for Mac and Windows for download information.

If you are using Linux, run the following command to manually install the latest version of docker scan:

On .deb based distros, such as Ubuntu and Debian:

$ apt-get update && apt-get install docker-scan-plugin
+

On rpm-based distros, such as CentOS or Fedora:

$ yum install docker-scan-plugin
+

Alternatively, you can manually download the docker scan binaries from the Docker Scan GitHub repository and install in the plugins directory.

Verify the docker scan version

After upgrading docker scan, verify you are running the latest version by running the following command:

$ docker scan --accept-license --version
+Version:    v0.12.0
+Git commit: 1074dd0
+Provider:   Snyk (1.790.0 (standalone))
+

If your code output contains ORGAPACHELOGGINGLOG4J, it is likely that your code is affected by the Log4j 2 CVE-2021-44228 vulnerability. When you run the updated version of docker scan, you should also see a message in the output log similar to:

Upgrade org.apache.logging.log4j:log4j-core@2.14.0 to org.apache.logging.log4j:log4j-core@2.15.0 to fix
+✗ Arbitrary Code Execution (new) [Critical Severity][https://snyk.io/vuln/SNYK-JAVA-ORGAPACHELOGGINGLOG4J-2314720] in org.apache.logging.log4j:log4j-core@2.14.0
+introduced by org.apache.logging.log4j:log4j-core@2.14.0
+

For more information, read our blog post Apache Log4j 2 CVE-2021-44228.

How to scan images

The docker scan command allows you to scan existing Docker images using the image name or ID. For example, run the following command to scan the hello-world image:

$ docker scan hello-world
+
+Testing hello-world...
+
+Organization:      docker-desktop-test
+Package manager:   linux
+Project name:      docker-image|hello-world
+Docker image:      hello-world
+Licenses:          enabled
+
+✓ Tested 0 dependencies for known issues, no vulnerable paths found.
+
+Note that we do not currently have vulnerability data for your image.
+

Get a detailed scan report

You can get a detailed scan report about a Docker image by providing the Dockerfile used to create the image. The syntax is docker scan --file PATH_TO_DOCKERFILE DOCKER_IMAGE.

For example, if you apply the option to the docker-scan test image, it displays the following result:

$ docker scan --file Dockerfile docker-scan:e2e
+Testing docker-scan:e2e
+...
+✗ High severity vulnerability found in perl
+  Description: Integer Overflow or Wraparound
+  Info: https://snyk.io/vuln/SNYK-DEBIAN10-PERL-570802
+  Introduced through: git@1:2.20.1-2+deb10u3, meta-common-packages@meta
+  From: git@1:2.20.1-2+deb10u3 > perl@5.28.1-6
+  From: git@1:2.20.1-2+deb10u3 > liberror-perl@0.17027-2 > perl@5.28.1-6
+  From: git@1:2.20.1-2+deb10u3 > perl@5.28.1-6 > perl/perl-modules-5.28@5.28.1-6
+  and 3 more...
+  Introduced by your base image (golang:1.14.6)
+
+Organization:      docker-desktop-test
+Package manager:   deb
+Target file:       Dockerfile
+Project name:      docker-image|99138c65ebc7
+Docker image:      99138c65ebc7
+Base image:        golang:1.14.6
+Licenses:          enabled
+
+Tested 200 dependencies for known issues, found 157 issues.
+
+According to our scan, you are currently using the most secure version of the selected base image
+

Excluding the base image

When using docker scan with the --file flag, you can also add the --exclude-base tag. This excludes the base image (specified in the Dockerfile using the FROM directive) vulnerabilities from your report. For example:

$ docker scan --file Dockerfile --exclude-base docker-scan:e2e
+Testing docker-scan:e2e
+...
+✗ Medium severity vulnerability found in libidn2/libidn2-0
+  Description: Improper Input Validation
+  Info: https://snyk.io/vuln/SNYK-DEBIAN10-LIBIDN2-474100
+  Introduced through: iputils/iputils-ping@3:20180629-2+deb10u1, wget@1.20.1-1.1, curl@7.64.0-4+deb10u1, git@1:2.20.1-2+deb10u3
+  From: iputils/iputils-ping@3:20180629-2+deb10u1 > libidn2/libidn2-0@2.0.5-1+deb10u1
+  From: wget@1.20.1-1.1 > libidn2/libidn2-0@2.0.5-1+deb10u1
+  From: curl@7.64.0-4+deb10u1 > curl/libcurl4@7.64.0-4+deb10u1 > libidn2/libidn2-0@2.0.5-1+deb10u1
+  and 3 more...
+  Introduced in your Dockerfile by 'RUN apk add -U --no-cache wget tar'
+
+
+
+Organization:      docker-desktop-test
+Package manager:   deb
+Target file:       Dockerfile
+Project name:      docker-image|99138c65ebc7
+Docker image:      99138c65ebc7
+Base image:        golang:1.14.6
+Licenses:          enabled
+
+Tested 200 dependencies for known issues, found 16 issues.
+

Viewing the JSON output

You can also display the scan result as a JSON output by adding the --json flag to the command. For example:

$ docker scan --json hello-world
+{
+  "vulnerabilities": [],
+  "ok": true,
+  "dependencyCount": 0,
+  "org": "docker-desktop-test",
+  "policy": "# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.\nversion: v1.19.0\nignore: {}\npatch: {}\n",
+  "isPrivate": true,
+  "licensesPolicy": {
+    "severities": {},
+    "orgLicenseRules": {
+      "AGPL-1.0": {
+        "licenseType": "AGPL-1.0",
+        "severity": "high",
+        "instructions": ""
+      },
+      ...
+      "SimPL-2.0": {
+        "licenseType": "SimPL-2.0",
+        "severity": "high",
+        "instructions": ""
+      }
+    }
+  },
+  "packageManager": "linux",
+  "ignoreSettings": null,
+  "docker": {
+    "baseImageRemediation": {
+      "code": "SCRATCH_BASE_IMAGE",
+      "advice": [
+        {
+          "message": "Note that we do not currently have vulnerability data for your image.",
+          "bold": true,
+          "color": "yellow"
+        }
+      ]
+    },
+    "binariesVulns": {
+      "issuesData": {},
+      "affectedPkgs": {}
+    }
+  },
+  "summary": "No known vulnerabilities",
+  "filesystemPolicy": false,
+  "uniqueCount": 0,
+  "projectName": "docker-image|hello-world",
+  "path": "hello-world"
+}
+

In addition to the --json flag, you can also use the --group-issues flag to display a vulnerability only once in the scan report:

$ docker scan --json --group-issues docker-scan:e2e
+{
+    {
+      "title": "Improper Check for Dropped Privileges",
+      ...
+      "packageName": "bash",
+      "language": "linux",
+      "packageManager": "debian:10",
+      "description": "## Overview\nAn issue was discovered in disable_priv_mode in shell.c in GNU Bash through 5.0 patch 11. By default, if Bash is run with its effective UID not equal to its real UID, it will drop privileges by setting its effective UID to its real UID. However, it does so incorrectly. On Linux and other systems that support \"saved UID\" functionality, the saved UID is not dropped. An attacker with command execution in the shell can use \"enable -f\" for runtime loading of a new builtin, which can be a shared object that calls setuid() and therefore regains privileges. However, binaries running with an effective UID of 0 are unaffected.\n\n## References\n- [CONFIRM](https://security.netapp.com/advisory/ntap-20200430-0003/)\n- [Debian Security Tracker](https://security-tracker.debian.org/tracker/CVE-2019-18276)\n- [GitHub Commit](https://github.com/bminor/bash/commit/951bdaad7a18cc0dc1036bba86b18b90874d39ff)\n- [MISC](http://packetstormsecurity.com/files/155498/Bash-5.0-Patch-11-Privilege-Escalation.html)\n- [MISC](https://www.youtube.com/watch?v=-wGtxJ8opa8)\n- [Ubuntu CVE Tracker](http://people.ubuntu.com/~ubuntu-security/cve/CVE-2019-18276)\n",
+      "identifiers": {
+        "ALTERNATIVE": [],
+        "CVE": [
+          "CVE-2019-18276"
+        ],
+        "CWE": [
+          "CWE-273"
+        ]
+      },
+      "severity": "low",
+      "severityWithCritical": "low",
+      "cvssScore": 7.8,
+      "CVSSv3": "CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H/E:F",
+      ...
+      "from": [
+        "docker-image|docker-scan@e2e",
+        "bash@5.0-4"
+      ],
+      "upgradePath": [],
+      "isUpgradable": false,
+      "isPatchable": false,
+      "name": "bash",
+      "version": "5.0-4"
+    },
+    ...
+    "summary": "880 vulnerable dependency paths",
+      "filesystemPolicy": false,
+      "filtered": {
+        "ignore": [],
+        "patch": []
+      },
+      "uniqueCount": 158,
+      "projectName": "docker-image|docker-scan",
+      "platform": "linux/amd64",
+      "path": "docker-scan:e2e"
+}
+

You can find all the sources of the vulnerability in the from section.

Checking the dependency tree

To view the dependency tree of your image, use the --dependency-tree flag. This displays all the dependencies before the scan result. For example:

$ docker scan --dependency-tree debian:buster
+
+$ docker-image|99138c65ebc7 @ latest
+     ├─ ca-certificates @ 20200601~deb10u1
+     │  └─ openssl @ 1.1.1d-0+deb10u3
+     │     └─ openssl/libssl1.1 @ 1.1.1d-0+deb10u3
+     ├─ curl @ 7.64.0-4+deb10u1
+     │  └─ curl/libcurl4 @ 7.64.0-4+deb10u1
+     │     ├─ e2fsprogs/libcom-err2 @ 1.44.5-1+deb10u3
+     │     ├─ krb5/libgssapi-krb5-2 @ 1.17-3
+     │     │  ├─ e2fsprogs/libcom-err2 @ 1.44.5-1+deb10u3
+     │     │  ├─ krb5/libk5crypto3 @ 1.17-3
+     │     │  │  └─ krb5/libkrb5support0 @ 1.17-3
+     │     │  ├─ krb5/libkrb5-3 @ 1.17-3
+     │     │  │  ├─ e2fsprogs/libcom-err2 @ 1.44.5-1+deb10u3
+     │     │  │  ├─ krb5/libk5crypto3 @ 1.17-3
+     │     │  │  ├─ krb5/libkrb5support0 @ 1.17-3
+     │     │  │  └─ openssl/libssl1.1 @ 1.1.1d-0+deb10u3
+     │     │  └─ krb5/libkrb5support0 @ 1.17-3
+     │     ├─ libidn2/libidn2-0 @ 2.0.5-1+deb10u1
+     │     │  └─ libunistring/libunistring2 @ 0.9.10-1
+     │     ├─ krb5/libk5crypto3 @ 1.17-3
+     │     ├─ krb5/libkrb5-3 @ 1.17-3
+     │     ├─ openldap/libldap-2.4-2 @ 2.4.47+dfsg-3+deb10u2
+     │     │  ├─ gnutls28/libgnutls30 @ 3.6.7-4+deb10u4
+     │     │  │  ├─ nettle/libhogweed4 @ 3.4.1-1
+     │     │  │  │  └─ nettle/libnettle6 @ 3.4.1-1
+     │     │  │  ├─ libidn2/libidn2-0 @ 2.0.5-1+deb10u1
+     │     │  │  ├─ nettle/libnettle6 @ 3.4.1-1
+     │     │  │  ├─ p11-kit/libp11-kit0 @ 0.23.15-2
+     │     │  │  │  └─ libffi/libffi6 @ 3.2.1-9
+     │     │  │  ├─ libtasn1-6 @ 4.13-3
+     │     │  │  └─ libunistring/libunistring2 @ 0.9.10-1
+     │     │  ├─ cyrus-sasl2/libsasl2-2 @ 2.1.27+dfsg-1+deb10u1
+     │     │  │  └─ cyrus-sasl2/libsasl2-modules-db @ 2.1.27+dfsg-1+deb10u1
+     │     │  │     └─ db5.3/libdb5.3 @ 5.3.28+dfsg1-0.5
+     │     │  └─ openldap/libldap-common @ 2.4.47+dfsg-3+deb10u2
+     │     ├─ nghttp2/libnghttp2-14 @ 1.36.0-2+deb10u1
+     │     ├─ libpsl/libpsl5 @ 0.20.2-2
+     │     │  ├─ libidn2/libidn2-0 @ 2.0.5-1+deb10u1
+     │     │  └─ libunistring/libunistring2 @ 0.9.10-1
+     │     ├─ rtmpdump/librtmp1 @ 2.4+20151223.gitfa8646d.1-2
+     │     │  ├─ gnutls28/libgnutls30 @ 3.6.7-4+deb10u4
+     │     │  ├─ nettle/libhogweed4 @ 3.4.1-1
+     │     │  └─ nettle/libnettle6 @ 3.4.1-1
+     │     ├─ libssh2/libssh2-1 @ 1.8.0-2.1
+     │     │  └─ libgcrypt20 @ 1.8.4-5
+     │     └─ openssl/libssl1.1 @ 1.1.1d-0+deb10u3
+     ├─ gnupg2/dirmngr @ 2.2.12-1+deb10u1
+    ...
+
+Organization:      docker-desktop-test
+Package manager:   deb
+Project name:      docker-image|99138c65ebc7
+Docker image:      99138c65ebc7
+Licenses:          enabled
+
+Tested 200 dependencies for known issues, found 157 issues.
+
+For more free scans that keep your images secure, sign up to Snyk at https://dockr.ly/3ePqVcp.
+

For more information about the vulnerability data, see Docker Vulnerability Scanning CLI Cheat Sheet.

Limiting the level of vulnerabilities displayed

Docker scan allows you to choose the level of vulnerabilities displayed in your scan report using the --severity flag. You can set the severity flag to low, medium, orhigh depending on the level of vulnerabilities you’d like to see in your report.
For example, if you set the severity level as medium, the scan report displays all vulnerabilities that are classified as medium and high.

$ docker scan --severity=medium docker-scan:e2e 
+./bin/docker-scan_darwin_amd64 scan --severity=medium docker-scan:e2e
+
+Testing docker-scan:e2e...
+
+✗ Medium severity vulnerability found in sqlite3/libsqlite3-0
+  Description: Divide By Zero
+  Info: https://snyk.io/vuln/SNYK-DEBIAN10-SQLITE3-466337
+  Introduced through: gnupg2/gnupg@2.2.12-1+deb10u1, subversion@1.10.4-1+deb10u1, mercurial@4.8.2-1+deb10u1
+  From: gnupg2/gnupg@2.2.12-1+deb10u1 > gnupg2/gpg@2.2.12-1+deb10u1 > sqlite3/libsqlite3-0@3.27.2-3
+  From: subversion@1.10.4-1+deb10u1 > subversion/libsvn1@1.10.4-1+deb10u1 > sqlite3/libsqlite3-0@3.27.2-3
+  From: mercurial@4.8.2-1+deb10u1 > python-defaults/python@2.7.16-1 > python2.7@2.7.16-2+deb10u1 > python2.7/libpython2.7-stdlib@2.7.16-2+deb10u1 > sqlite3/libsqlite3-0@3.27.2-3
+
+✗ Medium severity vulnerability found in sqlite3/libsqlite3-0
+  Description: Uncontrolled Recursion
+...
+✗ High severity vulnerability found in binutils/binutils-common
+  Description: Missing Release of Resource after Effective Lifetime
+  Info: https://snyk.io/vuln/SNYK-DEBIAN10-BINUTILS-403318
+  Introduced through: gcc-defaults/g++@4:8.3.0-1
+  From: gcc-defaults/g++@4:8.3.0-1 > gcc-defaults/gcc@4:8.3.0-1 > gcc-8@8.3.0-6 > binutils@2.31.1-16 > binutils/binutils-common@2.31.1-16
+  From: gcc-defaults/g++@4:8.3.0-1 > gcc-defaults/gcc@4:8.3.0-1 > gcc-8@8.3.0-6 > binutils@2.31.1-16 > binutils/libbinutils@2.31.1-16 > binutils/binutils-common@2.31.1-16
+  From: gcc-defaults/g++@4:8.3.0-1 > gcc-defaults/gcc@4:8.3.0-1 > gcc-8@8.3.0-6 > binutils@2.31.1-16 > binutils/binutils-x86-64-linux-gnu@2.31.1-16 > binutils/binutils-common@2.31.1-16
+  and 4 more...
+
+Organization:      docker-desktop-test
+Package manager:   deb
+Project name:      docker-image|docker-scan
+Docker image:      docker-scan:e2e
+Platform:          linux/amd64
+Licenses:          enabled
+
+Tested 200 dependencies for known issues, found 37 issues.
+

Provider authentication

If you have an existing Snyk account, you can directly use your Snyk API token:

$ docker scan --login --token SNYK_AUTH_TOKEN
+
+Your account has been authenticated. Snyk is now ready to be used.
+

If you use the --login flag without any token, you will be redirected to the Snyk website to login.

Prerequisites

To run vulnerability scanning on your Docker images, you must meet the following requirements:

  1. Download and install the latest version of Docker Desktop.

  2. Sign into Docker Hub.

  3. From the Docker Desktop menu, select Sign in/ Create Docker ID. Alternatively, open a terminal and run the command docker login.

  4. (Optional) You can create a Snyk account for scans, or use the additional monthly free scans provided by Snyk with your Docker Hub account.

Check your installation by running docker scan --version, it should print the current version of docker scan and the Snyk engine version. For example:

$ docker scan --version
+Version:    v0.5.0
+Git commit: 5a09266
+Provider:   Snyk (1.432.0)
+

Note:

Docker Scan uses the Snyk binary installed in your environment by default. If this is not available, it uses the Snyk binary embedded in Docker Desktop. The minimum version required for Snyk is 1.385.0.

Supported options

The high-level docker scan command scans local images using the image name or the image ID. It supports the following options:

Option Description
--accept-license Accept the license agreement of the third-party scanning provider
--dependency-tree Display the dependency tree of the image along with scan results
--exclude-base Exclude the base image during scanning. This option requires the --file option to be set
+-f, --file string + Specify the location of the Dockerfile associated with the image. This option displays a detailed scan result
--json Display the result of the scan in JSON format
--login Log into Snyk using an optional token (using the flag --token), or by using a web-based token
--reject-license Reject the license agreement of the third-party scanning provider
--severity string Only report vulnerabilities of provided level or higher (low, medium, high)
--token string Use the authentication token to log into the third-party scanning provider
--version Display the Docker Scan plugin version

Known issues

WSL 2

Feedback

Your feedback is very important to us. Let us know your feedback by creating an issue in the scan-cli-plugin GitHub repository.

+

Docker, scan, Snyk, images, local, CVE, vulnerability, security

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/scan/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Fapparmor%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Fapparmor%2Findex.html new file mode 100644 index 00000000..a146da07 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Fapparmor%2Findex.html @@ -0,0 +1,134 @@ +

AppArmor security profiles for Docker

+ +

AppArmor (Application Armor) is a Linux security module that protects an operating system and its applications from security threats. To use it, a system administrator associates an AppArmor security profile with each program. Docker expects to find an AppArmor policy loaded and enforced.

Docker automatically generates and loads a default profile for containers named docker-default. The Docker binary generates this profile in tmpfs and then loads it into the kernel.

Note: This profile is used on containers, not on the Docker Daemon.

A profile for the Docker Engine daemon exists but it is not currently installed with the deb packages. If you are interested in the source for the daemon profile, it is located in contrib/apparmor in the Docker Engine source repository.

Understand the policies

The docker-default profile is the default for running containers. It is moderately protective while providing wide application compatibility. The profile is generated from the following template.

When you run a container, it uses the docker-default policy unless you override it with the security-opt option. For example, the following explicitly specifies the default policy:

$ docker run --rm -it --security-opt apparmor=docker-default hello-world
+

Load and unload profiles

To load a new profile into AppArmor for use with containers:

$ apparmor_parser -r -W /path/to/your_profile
+

Then, run the custom profile with --security-opt like so:

$ docker run --rm -it --security-opt apparmor=your_profile hello-world
+

To unload a profile from AppArmor:

# unload the profile
+$ apparmor_parser -R /path/to/profile
+

Resources for writing profiles

The syntax for file globbing in AppArmor is a bit different than some other globbing implementations. It is highly suggested you take a look at some of the below resources with regard to AppArmor profile syntax.

Nginx example profile

In this example, you create a custom AppArmor profile for Nginx. Below is the custom profile.

#include <tunables/global>
+
+
+profile docker-nginx flags=(attach_disconnected,mediate_deleted) {
+  #include <abstractions/base>
+
+  network inet tcp,
+  network inet udp,
+  network inet icmp,
+
+  deny network raw,
+
+  deny network packet,
+
+  file,
+  umount,
+
+  deny /bin/** wl,
+  deny /boot/** wl,
+  deny /dev/** wl,
+  deny /etc/** wl,
+  deny /home/** wl,
+  deny /lib/** wl,
+  deny /lib64/** wl,
+  deny /media/** wl,
+  deny /mnt/** wl,
+  deny /opt/** wl,
+  deny /proc/** wl,
+  deny /root/** wl,
+  deny /sbin/** wl,
+  deny /srv/** wl,
+  deny /tmp/** wl,
+  deny /sys/** wl,
+  deny /usr/** wl,
+
+  audit /** w,
+
+  /var/run/nginx.pid w,
+
+  /usr/sbin/nginx ix,
+
+  deny /bin/dash mrwklx,
+  deny /bin/sh mrwklx,
+  deny /usr/bin/top mrwklx,
+
+
+  capability chown,
+  capability dac_override,
+  capability setuid,
+  capability setgid,
+  capability net_bind_service,
+
+  deny @{PROC}/* w,   # deny write for all files directly in /proc (not in a subdir)
+  # deny write to files not in /proc/<number>/** or /proc/sys/**
+  deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
+  deny @{PROC}/sys/[^k]** w,  # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
+  deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w,  # deny everything except shm* in /proc/sys/kernel/
+  deny @{PROC}/sysrq-trigger rwklx,
+  deny @{PROC}/mem rwklx,
+  deny @{PROC}/kmem rwklx,
+  deny @{PROC}/kcore rwklx,
+
+  deny mount,
+
+  deny /sys/[^f]*/** wklx,
+  deny /sys/f[^s]*/** wklx,
+  deny /sys/fs/[^c]*/** wklx,
+  deny /sys/fs/c[^g]*/** wklx,
+  deny /sys/fs/cg[^r]*/** wklx,
+  deny /sys/firmware/** rwklx,
+  deny /sys/kernel/security/** rwklx,
+}
+
  1. Save the custom profile to disk in the /etc/apparmor.d/containers/docker-nginx file.

    The file path in this example is not a requirement. In production, you could use another.

  2. Load the profile.

    $ sudo apparmor_parser -r -W /etc/apparmor.d/containers/docker-nginx
    +
  3. Run a container with the profile.

    To run nginx in detached mode:

    $ docker run --security-opt "apparmor=docker-nginx" \
    +     -p 80:80 -d --name apparmor-nginx nginx
    +
  4. Exec into the running container.

    $ docker container exec -it apparmor-nginx bash
    +
  5. Try some operations to test the profile.

    root@6da5a2a930b9:~# ping 8.8.8.8
    +ping: Lacking privilege for raw socket.
    +
    +root@6da5a2a930b9:/# top
    +bash: /usr/bin/top: Permission denied
    +
    +root@6da5a2a930b9:~# touch ~/thing
    +touch: cannot touch 'thing': Permission denied
    +
    +root@6da5a2a930b9:/# sh
    +bash: /bin/sh: Permission denied
    +
    +root@6da5a2a930b9:/# dash
    +bash: /bin/dash: Permission denied
    +

Congrats! You just deployed a container secured with a custom apparmor profile!

Debug AppArmor

You can use dmesg to debug problems and aa-status check the loaded profiles.

Use dmesg

Here are some helpful tips for debugging any problems you might be facing with regard to AppArmor.

AppArmor sends quite verbose messaging to dmesg. Usually an AppArmor line looks like the following:

[ 5442.864673] audit: type=1400 audit(1453830992.845:37): apparmor="ALLOWED" operation="open" profile="/usr/bin/docker" name="/home/jessie/docker/man/man1/docker-attach.1" pid=10923 comm="docker" requested_mask="r" denied_mask="r" fsuid=1000 ouid=0
+

In the above example, you can see profile=/usr/bin/docker. This means the user has the docker-engine (Docker Engine Daemon) profile loaded.

Look at another log line:

[ 3256.689120] type=1400 audit(1405454041.341:73): apparmor="DENIED" operation="ptrace" profile="docker-default" pid=17651 comm="docker" requested_mask="receive" denied_mask="receive"
+

This time the profile is docker-default, which is run on containers by default unless in privileged mode. This line shows that apparmor has denied ptrace in the container. This is exactly as expected.

Use aa-status

If you need to check which profiles are loaded, you can use aa-status. The output looks like:

$ sudo aa-status
+apparmor module is loaded.
+14 profiles are loaded.
+1 profiles are in enforce mode.
+   docker-default
+13 profiles are in complain mode.
+   /usr/bin/docker
+   /usr/bin/docker///bin/cat
+   /usr/bin/docker///bin/ps
+   /usr/bin/docker///sbin/apparmor_parser
+   /usr/bin/docker///sbin/auplink
+   /usr/bin/docker///sbin/blkid
+   /usr/bin/docker///sbin/iptables
+   /usr/bin/docker///sbin/mke2fs
+   /usr/bin/docker///sbin/modprobe
+   /usr/bin/docker///sbin/tune2fs
+   /usr/bin/docker///sbin/xtables-multi
+   /usr/bin/docker///sbin/zfs
+   /usr/bin/docker///usr/bin/xz
+38 processes have profiles defined.
+37 processes are in enforce mode.
+   docker-default (6044)
+   ...
+   docker-default (31899)
+1 processes are in complain mode.
+   /usr/bin/docker (29756)
+0 processes are unconfined but have a profile defined.
+

The above output shows that the docker-default profile running on various container PIDs is in enforce mode. This means AppArmor is actively blocking and auditing in dmesg anything outside the bounds of the docker-default profile.

The output above also shows the /usr/bin/docker (Docker Engine daemon) profile is running in complain mode. This means AppArmor only logs to dmesg activity outside the bounds of the profile. (Except in the case of Ubuntu Trusty, where some interesting behaviors are enforced.)

Contribute Docker’s AppArmor code

Advanced users and package managers can find a profile for /usr/bin/docker (Docker Engine Daemon) underneath contrib/apparmor in the Docker Engine source repository.

The docker-default profile for containers lives in profiles/apparmor.

+

AppArmor, security, docker, documentation

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/apparmor/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Fcertificates%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Fcertificates%2Findex.html new file mode 100644 index 00000000..e003da40 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Fcertificates%2Findex.html @@ -0,0 +1,24 @@ +

Verify repository client with certificates

+ +

In Running Docker with HTTPS, you learned that, by default, Docker runs via a non-networked Unix socket and TLS must be enabled in order to have the Docker client and the daemon communicate securely over HTTPS. TLS ensures authenticity of the registry endpoint and that traffic to/from registry is encrypted.

This article demonstrates how to ensure the traffic between the Docker registry server and the Docker daemon (a client of the registry server) is encrypted and properly authenticated using certificate-based client-server authentication.

We show you how to install a Certificate Authority (CA) root certificate for the registry and how to set the client TLS certificate for verification.

Understand the configuration

A custom certificate is configured by creating a directory under /etc/docker/certs.d using the same name as the registry’s hostname, such as localhost. All *.crt files are added to this directory as CA roots.

Note

On Linux any root certificates authorities are merged with the system defaults, including the host’s root CA set. If you are running Docker on Windows Server, or Docker Desktop for Windows with Windows containers, the system default certificates are only used when no custom root certificates are configured.

The presence of one or more <filename>.key/cert pairs indicates to Docker that there are custom certificates required for access to the desired repository.

Note: If multiple certificates exist, each is tried in alphabetical order. If there is a 4xx-level or 5xx-level authentication error, Docker continues to try with the next certificate.

The following illustrates a configuration with custom certificates:

    /etc/docker/certs.d/        <-- Certificate directory
+    └── localhost:5000          <-- Hostname:port
+       ├── client.cert          <-- Client certificate
+       ├── client.key           <-- Client key
+       └── ca.crt               <-- Certificate authority that signed
+                                    the registry certificate
+

The preceding example is operating-system specific and is for illustrative purposes only. You should consult your operating system documentation for creating an os-provided bundled certificate chain.

Create the client certificates

Use OpenSSL’s genrsa and req commands to first generate an RSA key and then use the key to create the certificate.

$ openssl genrsa -out client.key 4096
+$ openssl req -new -x509 -text -key client.key -out client.cert
+

Note: These TLS commands only generate a working set of certificates on Linux. The version of OpenSSL in macOS is incompatible with the type of certificate Docker requires.

Troubleshooting tips

The Docker daemon interprets .crt files as CA certificates and .cert files as client certificates. If a CA certificate is accidentally given the extension .cert instead of the correct .crt extension, the Docker daemon logs the following error message:

Missing key KEY_NAME for client certificate CERT_NAME. CA certificates should use the extension .crt.
+

If the Docker registry is accessed without a port number, do not add the port to the directory name. The following shows the configuration for a registry on default port 443 which is accessed with docker login my-https.registry.example.com:

    /etc/docker/certs.d/
+    └── my-https.registry.example.com          <-- Hostname without port
+       ├── client.cert
+       ├── client.key
+       └── ca.crt
+
+

Usage, registry, repository, client, root, certificate, docker, apache, ssl, tls, documentation, examples, articles, tutorials

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/certificates/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Findex.html new file mode 100644 index 00000000..a6482035 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Findex.html @@ -0,0 +1,11 @@ +

Docker security

+ +

There are four major areas to consider when reviewing Docker security:

Kernel namespaces

Docker containers are very similar to LXC containers, and they have similar security features. When you start a container with docker run, behind the scenes Docker creates a set of namespaces and control groups for the container.

Namespaces provide the first and most straightforward form of isolation: processes running within a container cannot see, and even less affect, processes running in another container, or in the host system.

Each container also gets its own network stack, meaning that a container doesn’t get privileged access to the sockets or interfaces of another container. Of course, if the host system is setup accordingly, containers can interact with each other through their respective network interfaces — just like they can interact with external hosts. When you specify public ports for your containers or use links then IP traffic is allowed between containers. They can ping each other, send/receive UDP packets, and establish TCP connections, but that can be restricted if necessary. From a network architecture point of view, all containers on a given Docker host are sitting on bridge interfaces. This means that they are just like physical machines connected through a common Ethernet switch; no more, no less.

How mature is the code providing kernel namespaces and private networking? Kernel namespaces were introduced between kernel version 2.6.15 and 2.6.26. This means that since July 2008 (date of the 2.6.26 release ), namespace code has been exercised and scrutinized on a large number of production systems. And there is more: the design and inspiration for the namespaces code are even older. Namespaces are actually an effort to reimplement the features of OpenVZ in such a way that they could be merged within the mainstream kernel. And OpenVZ was initially released in 2005, so both the design and the implementation are pretty mature.

Control groups

Control Groups are another key component of Linux Containers. They implement resource accounting and limiting. They provide many useful metrics, but they also help ensure that each container gets its fair share of memory, CPU, disk I/O; and, more importantly, that a single container cannot bring the system down by exhausting one of those resources.

So while they do not play a role in preventing one container from accessing or affecting the data and processes of another container, they are essential to fend off some denial-of-service attacks. They are particularly important on multi-tenant platforms, like public and private PaaS, to guarantee a consistent uptime (and performance) even when some applications start to misbehave.

Control Groups have been around for a while as well: the code was started in 2006, and initially merged in kernel 2.6.24.

Docker daemon attack surface

Running containers (and applications) with Docker implies running the Docker daemon. This daemon requires root privileges unless you opt-in to Rootless mode, and you should therefore be aware of some important details.

First of all, only trusted users should be allowed to control your Docker daemon. This is a direct consequence of some powerful Docker features. Specifically, Docker allows you to share a directory between the Docker host and a guest container; and it allows you to do so without limiting the access rights of the container. This means that you can start a container where the /host directory is the / directory on your host; and the container can alter your host filesystem without any restriction. This is similar to how virtualization systems allow filesystem resource sharing. Nothing prevents you from sharing your root filesystem (or even your root block device) with a virtual machine.

This has a strong security implication: for example, if you instrument Docker from a web server to provision containers through an API, you should be even more careful than usual with parameter checking, to make sure that a malicious user cannot pass crafted parameters causing Docker to create arbitrary containers.

For this reason, the REST API endpoint (used by the Docker CLI to communicate with the Docker daemon) changed in Docker 0.5.2, and now uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the latter being prone to cross-site request forgery attacks if you happen to run Docker directly on your local machine, outside of a VM). You can then use traditional UNIX permission checks to limit access to the control socket.

You can also expose the REST API over HTTP if you explicitly decide to do so. However, if you do that, be aware of the above mentioned security implications. Note that even if you have a firewall to limit accesses to the REST API endpoint from other hosts in the network, the endpoint can be still accessible from containers, and it can easily result in the privilege escalation. Therefore it is mandatory to secure API endpoints with HTTPS and certificates. It is also recommended to ensure that it is reachable only from a trusted network or VPN.

You can also use DOCKER_HOST=ssh://USER@HOST or ssh -L /path/to/docker.sock:/var/run/docker.sock instead if you prefer SSH over TLS.

The daemon is also potentially vulnerable to other inputs, such as image loading from either disk with docker load, or from the network with docker pull. As of Docker 1.3.2, images are now extracted in a chrooted subprocess on Linux/Unix platforms, being the first-step in a wider effort toward privilege separation. As of Docker 1.10.0, all images are stored and accessed by the cryptographic checksums of their contents, limiting the possibility of an attacker causing a collision with an existing image.

Finally, if you run Docker on a server, it is recommended to run exclusively Docker on the server, and move all other services within containers controlled by Docker. Of course, it is fine to keep your favorite admin tools (probably at least an SSH server), as well as existing monitoring/supervision processes, such as NRPE and collectd.

Linux kernel capabilities

By default, Docker starts containers with a restricted set of capabilities. What does that mean?

Capabilities turn the binary “root/non-root” dichotomy into a fine-grained access control system. Processes (like web servers) that just need to bind on a port below 1024 do not need to run as root: they can just be granted the net_bind_service capability instead. And there are many other capabilities, for almost all the specific areas where root privileges are usually needed.

This means a lot for container security; let’s see why!

Typical servers run several processes as root, including the SSH daemon, cron daemon, logging daemons, kernel modules, network configuration tools, and more. A container is different, because almost all of those tasks are handled by the infrastructure around the container:

This means that in most cases, containers do not need “real” root privileges at all. And therefore, containers can run with a reduced capability set; meaning that “root” within a container has much less privileges than the real “root”. For instance, it is possible to:

This means that even if an intruder manages to escalate to root within a container, it is much harder to do serious damage, or to escalate to the host.

This doesn’t affect regular web apps, but reduces the vectors of attack by malicious users considerably. By default Docker drops all capabilities except those needed, an allowlist instead of a denylist approach. You can see a full list of available capabilities in Linux manpages.

One primary risk with running Docker containers is that the default set of capabilities and mounts given to a container may provide incomplete isolation, either independently, or when used in combination with kernel vulnerabilities.

Docker supports the addition and removal of capabilities, allowing use of a non-default profile. This may make Docker more secure through capability removal, or less secure through the addition of capabilities. The best practice for users would be to remove all capabilities except those explicitly required for their processes.

Docker Content Trust Signature Verification

The Docker Engine can be configured to only run signed images. The Docker Content Trust signature verification feature is built directly into the dockerd binary.
This is configured in the Dockerd configuration file.

To enable this feature, trustpinning can be configured in daemon.json, whereby only repositories signed with a user-specified root key can be pulled and run.

This feature provides more insight to administrators than previously available with the CLI for enforcing and performing image signature verification.

For more information on configuring Docker Content Trust Signature Verificiation, go to Content trust in Docker.

Other kernel security features

Capabilities are just one of the many security features provided by modern Linux kernels. It is also possible to leverage existing, well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with Docker.

While Docker currently only enables capabilities, it doesn’t interfere with the other systems. This means that there are many different ways to harden a Docker host. Here are a few examples.

Just as you can use third-party tools to augment Docker containers, including special network topologies or shared filesystems, tools exist to harden Docker containers without the need to modify Docker itself.

As of Docker 1.10 User Namespaces are supported directly by the docker daemon. This feature allows for the root user in a container to be mapped to a non uid-0 user outside the container, which can help to mitigate the risks of container breakout. This facility is available but not enabled by default.

Refer to the daemon command in the command line reference for more information on this feature. Additional information on the implementation of User Namespaces in Docker can be found in this blog post.

Conclusions

Docker containers are, by default, quite secure; especially if you run your processes as non-privileged users inside the container.

You can add an extra layer of safety by enabling AppArmor, SELinux, GRSEC, or another appropriate hardening system.

If you think of ways to make docker more secure, we welcome feature requests, pull requests, or comments on the Docker community forums.

+

Docker, Docker documentation, security

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Fprotect-access%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Fprotect-access%2Findex.html new file mode 100644 index 00000000..11c02fa1 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Fprotect-access%2Findex.html @@ -0,0 +1,117 @@ +

Protect the Docker daemon socket

+ +

By default, Docker runs through a non-networked UNIX socket. It can also optionally communicate using SSH or a TLS (HTTPS) socket.

Use SSH to protect the Docker daemon socket

Note

The given USERNAME must have permissions to access the docker socket on the remote machine. Refer to manage Docker as a non-root user to learn how to give a non-root user access to the docker socket.

The following example creates a docker context to connect with a remote dockerd daemon on host1.example.com using SSH, and as the docker-user user on the remote machine:

$ docker context create \
+    --docker host=ssh://docker-user@host1.example.com \
+    --description="Remote engine" \
+    my-remote-engine
+
+my-remote-engine
+Successfully created context "my-remote-engine"
+

After creating the context, use docker context use to switch the docker CLI to use it, and to connect to the remote engine:

$ docker context use my-remote-engine
+my-remote-engine
+Current context is now "my-remote-engine"
+
+$ docker info
+<prints output of the remote engine>
+

Use the default context to switch back to the default (local) daemon:

$ docker context use default
+default
+Current context is now "default"
+

Alternatively, use the DOCKER_HOST environment variable to temporarily switch the docker CLI to connect to the remote host using SSH. This does not require creating a context, and can be useful to create an ad-hoc connection with a different engine:

$ export DOCKER_HOST=ssh://docker-user@host1.example.com
+$ docker info
+<prints output of the remote engine>
+

SSH Tips

For the best user experience with SSH, configure ~/.ssh/config as follows to allow reusing a SSH connection for multiple invocations of the docker CLI:

ControlMaster     auto
+ControlPath       ~/.ssh/control-%C
+ControlPersist    yes
+

Use TLS (HTTPS) to protect the Docker daemon socket

If you need Docker to be reachable through HTTP rather than SSH in a safe manner, you can enable TLS (HTTPS) by specifying the tlsverify flag and pointing Docker’s tlscacert flag to a trusted CA certificate.

In the daemon mode, it only allows connections from clients authenticated by a certificate signed by that CA. In the client mode, it only connects to servers with a certificate signed by that CA.

Advanced topic

Using TLS and managing a CA is an advanced topic. Please familiarize yourself with OpenSSL, x509, and TLS before using it in production.

Create a CA, server and client keys with OpenSSL

Note: Replace all instances of $HOST in the following example with the DNS name of your Docker daemon’s host.

First, on the Docker daemon’s host machine, generate CA private and public keys:

$ openssl genrsa -aes256 -out ca-key.pem 4096
+Generating RSA private key, 4096 bit long modulus
+..............................................................................++
+........++
+e is 65537 (0x10001)
+Enter pass phrase for ca-key.pem:
+Verifying - Enter pass phrase for ca-key.pem:
+
+$ openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem
+Enter pass phrase for ca-key.pem:
+You are about to be asked to enter information that will be incorporated
+into your certificate request.
+What you are about to enter is what is called a Distinguished Name or a DN.
+There are quite a few fields but you can leave some blank
+For some fields there will be a default value,
+If you enter '.', the field will be left blank.
+-----
+Country Name (2 letter code) [AU]:
+State or Province Name (full name) [Some-State]:Queensland
+Locality Name (eg, city) []:Brisbane
+Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc
+Organizational Unit Name (eg, section) []:Sales
+Common Name (e.g. server FQDN or YOUR name) []:$HOST
+Email Address []:Sven@home.org.au
+

Now that you have a CA, you can create a server key and certificate signing request (CSR). Make sure that “Common Name” matches the hostname you use to connect to Docker:

Note: Replace all instances of $HOST in the following example with the DNS name of your Docker daemon’s host.

$ openssl genrsa -out server-key.pem 4096
+Generating RSA private key, 4096 bit long modulus
+.....................................................................++
+.................................................................................................++
+e is 65537 (0x10001)
+
+$ openssl req -subj "/CN=$HOST" -sha256 -new -key server-key.pem -out server.csr
+

Next, we’re going to sign the public key with our CA:

Since TLS connections can be made through IP address as well as DNS name, the IP addresses need to be specified when creating the certificate. For example, to allow connections using 10.10.10.20 and 127.0.0.1:

$ echo subjectAltName = DNS:$HOST,IP:10.10.10.20,IP:127.0.0.1 >> extfile.cnf
+

Set the Docker daemon key’s extended usage attributes to be used only for server authentication:

$ echo extendedKeyUsage = serverAuth >> extfile.cnf
+

Now, generate the signed certificate:

$ openssl x509 -req -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem \
+  -CAcreateserial -out server-cert.pem -extfile extfile.cnf
+Signature ok
+subject=/CN=your.host.com
+Getting CA Private Key
+Enter pass phrase for ca-key.pem:
+

Authorization plugins offer more fine-grained control to supplement authentication from mutual TLS. In addition to other information described in the above document, authorization plugins running on a Docker daemon receive the certificate information for connecting Docker clients.

For client authentication, create a client key and certificate signing request:

Note: For simplicity of the next couple of steps, you may perform this step on the Docker daemon’s host machine as well.

$ openssl genrsa -out key.pem 4096
+Generating RSA private key, 4096 bit long modulus
+.........................................................++
+................++
+e is 65537 (0x10001)
+
+$ openssl req -subj '/CN=client' -new -key key.pem -out client.csr
+

To make the key suitable for client authentication, create a new extensions config file:

$ echo extendedKeyUsage = clientAuth > extfile-client.cnf
+

Now, generate the signed certificate:

$ openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem \
+  -CAcreateserial -out cert.pem -extfile extfile-client.cnf
+Signature ok
+subject=/CN=client
+Getting CA Private Key
+Enter pass phrase for ca-key.pem:
+

After generating cert.pem and server-cert.pem you can safely remove the two certificate signing requests and extensions config files:

$ rm -v client.csr server.csr extfile.cnf extfile-client.cnf
+

With a default umask of 022, your secret keys are world-readable and writable for you and your group.

To protect your keys from accidental damage, remove their write permissions. To make them only readable by you, change file modes as follows:

$ chmod -v 0400 ca-key.pem key.pem server-key.pem
+

Certificates can be world-readable, but you might want to remove write access to prevent accidental damage:

$ chmod -v 0444 ca.pem server-cert.pem cert.pem
+

Now you can make the Docker daemon only accept connections from clients providing a certificate trusted by your CA:

$ dockerd \
+    --tlsverify \
+    --tlscacert=ca.pem \
+    --tlscert=server-cert.pem \
+    --tlskey=server-key.pem \
+    -H=0.0.0.0:2376
+

To connect to Docker and validate its certificate, provide your client keys, certificates and trusted CA:

Run it on the client machine

This step should be run on your Docker client machine. As such, you need to copy your CA certificate, your server certificate, and your client certificate to that machine.

Note: Replace all instances of $HOST in the following example with the DNS name of your Docker daemon’s host.

$ docker --tlsverify \
+    --tlscacert=ca.pem \
+    --tlscert=cert.pem \
+    --tlskey=key.pem \
+    -H=$HOST:2376 version
+

Note: Docker over TLS should run on TCP port 2376.

Warning: As shown in the example above, you don’t need to run the docker client with sudo or the docker group when you use certificate authentication. That means anyone with the keys can give any instructions to your Docker daemon, giving them root access to the machine hosting the daemon. Guard these keys as you would a root password!

Secure by default

If you want to secure your Docker client connections by default, you can move the files to the .docker directory in your home directory --- and set the DOCKER_HOST and DOCKER_TLS_VERIFY variables as well (instead of passing -H=tcp://$HOST:2376 and --tlsverify on every call).

$ mkdir -pv ~/.docker
+$ cp -v {ca,cert,key}.pem ~/.docker
+
+$ export DOCKER_HOST=tcp://$HOST:2376 DOCKER_TLS_VERIFY=1
+

Docker now connects securely by default:

$ docker ps
+

Other modes

If you don’t want to have complete two-way authentication, you can run Docker in various other modes by mixing the flags.

Daemon modes

Client modes

If found, the client sends its client certificate, so you just need to drop your keys into ~/.docker/{ca,cert,key}.pem. Alternatively, if you want to store your keys in another location, you can specify that location using the environment variable DOCKER_CERT_PATH.

$ export DOCKER_CERT_PATH=~/.docker/zone1/
+$ docker --tlsverify ps
+

Connecting to the secure Docker port using curl +

To use curl to make test API requests, you need to use three extra command line flags:

$ curl https://$HOST:2376/images/json \
+  --cert ~/.docker/cert.pem \
+  --key ~/.docker/key.pem \
+  --cacert ~/.docker/ca.pem
+
+

docker, docs, article, example, ssh, https, daemon, tls, ca, certificate

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/protect-access/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Frootless%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Frootless%2Findex.html new file mode 100644 index 00000000..9f8a4f22 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Frootless%2Findex.html @@ -0,0 +1,123 @@ +

Run the Docker daemon as a non-root user (Rootless mode)

+ +

Rootless mode allows running the Docker daemon and containers as a non-root user to mitigate potential vulnerabilities in the daemon and the container runtime.

Rootless mode does not require root privileges even during the installation of the Docker daemon, as long as the prerequisites are met.

Rootless mode was introduced in Docker Engine v19.03 as an experimental feature. Rootless mode graduated from experimental in Docker Engine v20.10.

How it works

Rootless mode executes the Docker daemon and containers inside a user namespace. This is very similar to userns-remap mode, except that with userns-remap mode, the daemon itself is running with root privileges, whereas in rootless mode, both the daemon and the container are running without root privileges.

Rootless mode does not use binaries with SETUID bits or file capabilities, except newuidmap and newgidmap, which are needed to allow multiple UIDs/GIDs to be used in the user namespace.

Prerequisites

$ id -u
+1001
+$ whoami
+testuser
+$ grep ^$(whoami): /etc/subuid
+testuser:231072:65536
+$ grep ^$(whoami): /etc/subgid
+testuser:231072:65536
+

Distribution-specific hint

Note: We recommend that you use the Ubuntu kernel.

  • Install dbus-user-session package if not installed. Run sudo apt-get install -y dbus-user-session and relogin.

  • overlay2 storage driver is enabled by default (Ubuntu-specific kernel patch).

  • Known to work on Ubuntu 18.04, 20.04, and 21.04.

  • Install dbus-user-session package if not installed. Run sudo apt-get install -y dbus-user-session and relogin.

  • For Debian 10, add kernel.unprivileged_userns_clone=1 to /etc/sysctl.conf (or /etc/sysctl.d) and run sudo sysctl --system. This step is not required on Debian 11.

  • Installing fuse-overlayfs is recommended. Run sudo apt-get install -y fuse-overlayfs. Using overlay2 storage driver with Debian-specific modprobe option sudo modprobe overlay permit_mounts_in_userns=1 is also possible, however, highly discouraged due to instability.

  • Rootless docker requires version of slirp4netns greater than v0.4.0 (when vpnkit is not installed). Check you have this with

    $ slirp4netns --version
    +

    If you do not have this download and install with sudo apt-get install -y slirp4netns or download the latest release.

  • Installing fuse-overlayfs is recommended. Run sudo pacman -S fuse-overlayfs.

  • Add kernel.unprivileged_userns_clone=1 to /etc/sysctl.conf (or /etc/sysctl.d) and run sudo sysctl --system

  • Installing fuse-overlayfs is recommended. Run sudo zypper install -y fuse-overlayfs.

  • sudo modprobe ip_tables iptable_mangle iptable_nat iptable_filter is required. This might be required on other distros as well depending on the configuration.

  • Known to work on openSUSE 15 and SLES 15.

  • Installing fuse-overlayfs is recommended. Run sudo dnf install -y fuse-overlayfs.

  • You might need sudo dnf install -y iptables.

  • Known to work on CentOS 8, RHEL 8, and Fedora 34.

  • Add user.max_user_namespaces=28633 to /etc/sysctl.conf (or /etc/sysctl.d) and run sudo sysctl --system.

  • systemctl --user does not work by default. Run dockerd-rootless.sh directly without systemd.

Known limitations

Install

Note

If the system-wide Docker daemon is already running, consider disabling it: $ sudo systemctl disable --now docker.service docker.socket

If you installed Docker 20.10 or later with RPM/DEB packages, you should have dockerd-rootless-setuptool.sh in /usr/bin.

Run dockerd-rootless-setuptool.sh install as a non-root user to set up the daemon:

$ dockerd-rootless-setuptool.sh install
+[INFO] Creating /home/testuser/.config/systemd/user/docker.service
+...
+[INFO] Installed docker.service successfully.
+[INFO] To control docker.service, run: `systemctl --user (start|stop|restart) docker.service`
+[INFO] To run docker.service on system startup, run: `sudo loginctl enable-linger testuser`
+
+[INFO] Make sure the following environment variables are set (or add them to ~/.bashrc):
+
+export PATH=/usr/bin:$PATH
+export DOCKER_HOST=unix:///run/user/1000/docker.sock
+

If dockerd-rootless-setuptool.sh is not present, you may need to install the docker-ce-rootless-extras package manually, e.g.,

$ sudo apt-get install -y docker-ce-rootless-extras
+

If you do not have permission to run package managers like apt-get and dnf, consider using the installation script available at https://get.docker.com/rootless. Since static packages are not available for s390x, hence it is not supported for s390x.

$ curl -fsSL https://get.docker.com/rootless | sh
+...
+[INFO] Creating /home/testuser/.config/systemd/user/docker.service
+...
+[INFO] Installed docker.service successfully.
+[INFO] To control docker.service, run: `systemctl --user (start|stop|restart) docker.service`
+[INFO] To run docker.service on system startup, run: `sudo loginctl enable-linger testuser`
+
+[INFO] Make sure the following environment variables are set (or add them to ~/.bashrc):
+
+export PATH=/home/testuser/bin:$PATH
+export DOCKER_HOST=unix:///run/user/1000/docker.sock
+

The binaries will be installed at ~/bin.

See Troubleshooting if you faced an error.

Uninstall

To remove the systemd service of the Docker daemon, run dockerd-rootless-setuptool.sh uninstall:

$ dockerd-rootless-setuptool.sh uninstall
++ systemctl --user stop docker.service
++ systemctl --user disable docker.service
+Removed /home/testuser/.config/systemd/user/default.target.wants/docker.service.
+[INFO] Uninstalled docker.service
+[INFO] This uninstallation tool does NOT remove Docker binaries and data.
+[INFO] To remove data, run: `/usr/bin/rootlesskit rm -rf /home/testuser/.local/share/docker`
+

Unset environment variables PATH and DOCKER_HOST if you have added them to ~/.bashrc.

To remove the data directory, run rootlesskit rm -rf ~/.local/share/docker.

To remove the binaries, remove docker-ce-rootless-extras package if you installed Docker with package managers. If you installed Docker with https://get.docker.com/rootless (Install without packages), remove the binary files under ~/bin:

$ cd ~/bin
+$ rm -f containerd containerd-shim containerd-shim-runc-v2 ctr docker docker-init docker-proxy dockerd dockerd-rootless-setuptool.sh dockerd-rootless.sh rootlesskit rootlesskit-docker-proxy runc vpnkit
+

Usage

Daemon

The systemd unit file is installed as ~/.config/systemd/user/docker.service.

Use systemctl --user to manage the lifecycle of the daemon:

$ systemctl --user start docker
+

To launch the daemon on system startup, enable the systemd service and lingering:

$ systemctl --user enable docker
+$ sudo loginctl enable-linger $(whoami)
+

Starting Rootless Docker as a systemd-wide service (/etc/systemd/system/docker.service) is not supported, even with the User= directive.

To run the daemon directly without systemd, you need to run dockerd-rootless.sh instead of dockerd.

The following environment variables must be set:

  • +$HOME: the home directory
  • +$XDG_RUNTIME_DIR: an ephemeral directory that is only accessible by the expected user, e,g, ~/.docker/run. The directory should be removed on every host shutdown. The directory can be on tmpfs, however, should not be under /tmp. Locating this directory under /tmp might be vulnerable to TOCTOU attack.

Remarks about directory paths:

Client

You need to specify either the socket path or the CLI context explicitly.

To specify the socket path using $DOCKER_HOST:

$ export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock
+$ docker run -d -p 8080:80 nginx
+

To specify the CLI context using docker context:

$ docker context use rootless
+rootless
+Current context is now "rootless"
+$ docker run -d -p 8080:80 nginx
+

Best practices

Rootless Docker in Docker

To run Rootless Docker inside “rootful” Docker, use the docker:<version>-dind-rootless image instead of docker:<version>-dind.

$ docker run -d --name dind-rootless --privileged docker:20.10-dind-rootless
+

The docker:<version>-dind-rootless image runs as a non-root user (UID 1000). However, --privileged is required for disabling seccomp, AppArmor, and mount masks.

Expose Docker API socket through TCP

To expose the Docker API socket through TCP, you need to launch dockerd-rootless.sh with DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp".

$ DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="-p 0.0.0.0:2376:2376/tcp" \
+  dockerd-rootless.sh \
+  -H tcp://0.0.0.0:2376 \
+  --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem
+

Expose Docker API socket through SSH

To expose the Docker API socket through SSH, you need to make sure $DOCKER_HOST is set on the remote host.

$ ssh -l <REMOTEUSER> <REMOTEHOST> 'echo $DOCKER_HOST'
+unix:///run/user/1001/docker.sock
+$ docker -H ssh://<REMOTEUSER>@<REMOTEHOST> run ...
+

Routing ping packets

On some distributions, ping does not work by default.

Add net.ipv4.ping_group_range = 0 2147483647 to /etc/sysctl.conf (or /etc/sysctl.d) and run sudo sysctl --system to allow using ping.

Exposing privileged ports

To expose privileged ports (< 1024), set CAP_NET_BIND_SERVICE on rootlesskit binary and restart the daemon.

$ sudo setcap cap_net_bind_service=ep $(which rootlesskit)
+$ systemctl --user restart docker
+

Or add net.ipv4.ip_unprivileged_port_start=0 to /etc/sysctl.conf (or /etc/sysctl.d) and run sudo sysctl --system.

Limiting resources

Limiting resources with cgroup-related docker run flags such as --cpus, --memory, --pids-limit is supported only when running with cgroup v2 and systemd. See Changing cgroup version to enable cgroup v2.

If docker info shows none as Cgroup Driver, the conditions are not satisfied. When these conditions are not satisfied, rootless mode ignores the cgroup-related docker run flags. See Limiting resources without cgroup for workarounds.

If docker info shows systemd as Cgroup Driver, the conditions are satisfied. However, typically, only memory and pids controllers are delegated to non-root users by default.

$ cat /sys/fs/cgroup/user.slice/user-$(id -u).slice/user@$(id -u).service/cgroup.controllers
+memory pids
+

To allow delegation of all controllers, you need to change the systemd configuration as follows:

# mkdir -p /etc/systemd/system/user@.service.d
+# cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF
+[Service]
+Delegate=cpu cpuset io memory pids
+EOF
+# systemctl daemon-reload
+

Note

Delegating cpuset requires systemd 244 or later.

Limiting resources without cgroup

Even when cgroup is not available, you can still use the traditional ulimit and cpulimit, though they work in process-granularity rather than in container-granularity, and can be arbitrarily disabled by the container process.

For example:

Troubleshooting

Errors when starting the Docker daemon

[rootlesskit:parent] error: failed to start the child: fork/exec /proc/self/exe: operation not permitted

This error occurs mostly when the value of /proc/sys/kernel/unprivileged_userns_clone is set to 0:

$ cat /proc/sys/kernel/unprivileged_userns_clone
+0
+

To fix this issue, add kernel.unprivileged_userns_clone=1 to /etc/sysctl.conf (or /etc/sysctl.d) and run sudo sysctl --system.

[rootlesskit:parent] error: failed to start the child: fork/exec /proc/self/exe: no space left on device

This error occurs mostly when the value of /proc/sys/user/max_user_namespaces is too small:

$ cat /proc/sys/user/max_user_namespaces
+0
+

To fix this issue, add user.max_user_namespaces=28633 to /etc/sysctl.conf (or /etc/sysctl.d) and run sudo sysctl --system.

[rootlesskit:parent] error: failed to setup UID/GID map: failed to compute uid/gid map: No subuid ranges found for user 1001 (“testuser”)

This error occurs when /etc/subuid and /etc/subgid are not configured. See Prerequisites.

could not get XDG_RUNTIME_DIR

This error occurs when $XDG_RUNTIME_DIR is not set.

On a non-systemd host, you need to create a directory and then set the path:

$ export XDG_RUNTIME_DIR=$HOME/.docker/xrd
+$ rm -rf $XDG_RUNTIME_DIR
+$ mkdir -p $XDG_RUNTIME_DIR
+$ dockerd-rootless.sh
+

Note: You must remove the directory every time you log out.

On a systemd host, log into the host using pam_systemd (see below). The value is automatically set to /run/user/$UID and cleaned up on every logout.

systemctl --user fails with “Failed to connect to bus: No such file or directory”

This error occurs mostly when you switch from the root user to an non-root user with sudo:

# sudo -iu testuser
+$ systemctl --user start docker
+Failed to connect to bus: No such file or directory
+

Instead of sudo -iu <USERNAME>, you need to log in using pam_systemd. For example:

The daemon does not start up automatically

You need sudo loginctl enable-linger $(whoami) to enable the daemon to start up automatically. See Usage.

iptables failed: iptables -t nat -N DOCKER: Fatal: can’t open lock file /run/xtables.lock: Permission denied

This error may happen with an older version of Docker when SELinux is enabled on the host.

The issue has been fixed in Docker 20.10.8. A known workaround for older version of Docker is to run the following commands to disable SELinux for iptables:

$ sudo dnf install -y policycoreutils-python-utils && sudo semanage permissive -a iptables_t
+

+docker pull errors

docker: failed to register layer: Error processing tar file(exit status 1): lchown <FILE>: invalid argument

This error occurs when the number of available entries in /etc/subuid or /etc/subgid is not sufficient. The number of entries required vary across images. However, 65,536 entries are sufficient for most images. See Prerequisites.

docker: failed to register layer: ApplyLayer exit status 1 stdout: stderr: lchown <FILE>: operation not permitted

This error occurs mostly when ~/.local/share/docker is located on NFS.

A workaround is to specify non-NFS data-root directory in ~/.config/docker/daemon.json as follows:

{"data-root":"/somewhere-out-of-nfs"}
+

+docker run errors

docker: Error response from daemon: OCI runtime create failed: ...: read unix @->/run/systemd/private: read: connection reset by peer: unknown.

This error occurs on cgroup v2 hosts mostly when the dbus daemon is not running for the user.

$ systemctl --user is-active dbus
+inactive
+
+$ docker run hello-world
+docker: Error response from daemon: OCI runtime create failed: container_linux.go:380: starting container process caused: process_linux.go:385: applying cgroup configuration for process caused: error while starting unit "docker
+-931c15729b5a968ce803784d04c7421f791d87e5ca1891f34387bb9f694c488e.scope" with properties [{Name:Description Value:"libcontainer container 931c15729b5a968ce803784d04c7421f791d87e5ca1891f34387bb9f694c488e"} {Name:Slice Value:"use
+r.slice"} {Name:PIDs Value:@au [4529]} {Name:Delegate Value:true} {Name:MemoryAccounting Value:true} {Name:CPUAccounting Value:true} {Name:IOAccounting Value:true} {Name:TasksAccounting Value:true} {Name:DefaultDependencies Val
+ue:false}]: read unix @->/run/systemd/private: read: connection reset by peer: unknown.
+

To fix the issue, run sudo apt-get install -y dbus-user-session or sudo dnf install -y dbus-daemon, and then relogin.

If the error still occurs, try running systemctl --user enable --now dbus (without sudo).

--cpus, --memory, and --pids-limit are ignored

This is an expected behavior on cgroup v1 mode. To use these flags, the host needs to be configured for enabling cgroup v2. For more information, see Limiting resources.

Networking errors

docker run -p fails with cannot expose privileged port

docker run -p fails with this error when a privileged port (< 1024) is specified as the host port.

$ docker run -p 80:80 nginx:alpine
+docker: Error response from daemon: driver failed programming external connectivity on endpoint focused_swanson (9e2e139a9d8fc92b37c36edfa6214a6e986fa2028c0cc359812f685173fa6df7): Error starting userland proxy: error while calling PortManager.AddPort(): cannot expose privileged port 80, you might need to add "net.ipv4.ip_unprivileged_port_start=0" (currently 1024) to /etc/sysctl.conf, or set CAP_NET_BIND_SERVICE on rootlesskit binary, or choose a larger port number (>= 1024): listen tcp 0.0.0.0:80: bind: permission denied.
+

When you experience this error, consider using an unprivileged port instead. For example, 8080 instead of 80.

$ docker run -p 8080:80 nginx:alpine
+

To allow exposing privileged ports, see Exposing privileged ports.

ping doesn’t work

Ping does not work when /proc/sys/net/ipv4/ping_group_range is set to 1 0:

$ cat /proc/sys/net/ipv4/ping_group_range
+1       0
+

For details, see Routing ping packets.

IPAddress shown in docker inspect is unreachable

This is an expected behavior, as the daemon is namespaced inside RootlessKit’s network namespace. Use docker run -p instead.

--net=host doesn’t listen ports on the host network namespace

This is an expected behavior, as the daemon is namespaced inside RootlessKit’s network namespace. Use docker run -p instead.

Network is slow

Docker with rootless mode uses slirp4netns as the default network stack if slirp4netns v0.4.0 or later is installed. If slirp4netns is not installed, Docker falls back to VPNKit.

Installing slirp4netns may improve the network throughput. See RootlessKit documentation for the benchmark result.

Also, changing MTU value may improve the throughput. The MTU value can be specified by creating ~/.config/systemd/user/docker.service.d/override.conf with the following content:

[Service]
+Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=<INTEGER>"
+

And then restart the daemon:

$ systemctl --user daemon-reload
+$ systemctl --user restart docker
+

docker run -p does not propagate source IP addresses

This is because Docker with rootless mode uses RootlessKit’s builtin port driver by default.

The source IP addresses can be propagated by creating ~/.config/systemd/user/docker.service.d/override.conf with the following content:

[Service]
+Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns"
+

And then restart the daemon:

$ systemctl --user daemon-reload
+$ systemctl --user restart docker
+

Note that this configuration decreases throughput. See RootlessKit documentation for the benchmark result.

Tips for debugging

Entering into dockerd namespaces

The dockerd-rootless.sh script executes dockerd in its own user, mount, and network namespaces.

For debugging, you can enter the namespaces by running nsenter -U --preserve-credentials -n -m -t $(cat $XDG_RUNTIME_DIR/docker.pid).

+

security, namespaces, rootless

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/rootless/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Fseccomp%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Fseccomp%2Findex.html new file mode 100644 index 00000000..2fdd4fd4 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Fseccomp%2Findex.html @@ -0,0 +1,18 @@ +

Seccomp security profiles for Docker

+ +

Secure computing mode (seccomp) is a Linux kernel feature. You can use it to restrict the actions available within the container. The seccomp() system call operates on the seccomp state of the calling process. You can use this feature to restrict your application’s access.

This feature is available only if Docker has been built with seccomp and the kernel is configured with CONFIG_SECCOMP enabled. To check if your kernel supports seccomp:

$ grep CONFIG_SECCOMP= /boot/config-$(uname -r)
+CONFIG_SECCOMP=y
+

Pass a profile for a container

The default seccomp profile provides a sane default for running containers with seccomp and disables around 44 system calls out of 300+. It is moderately protective while providing wide application compatibility. The default Docker profile can be found here.

In effect, the profile is a allowlist which denies access to system calls by default, then allowlists specific system calls. The profile works by defining a defaultAction of SCMP_ACT_ERRNO and overriding that action only for specific system calls. The effect of SCMP_ACT_ERRNO is to cause a Permission Denied error. Next, the profile defines a specific list of system calls which are fully allowed, because their action is overridden to be SCMP_ACT_ALLOW. Finally, some specific rules are for individual system calls such as personality, and others, to allow variants of those system calls with specific arguments.

seccomp is instrumental for running Docker containers with least privilege. It is not recommended to change the default seccomp profile.

When you run a container, it uses the default profile unless you override it with the --security-opt option. For example, the following explicitly specifies a policy:

$ docker run --rm \
+             -it \
+             --security-opt seccomp=/path/to/seccomp/profile.json \
+             hello-world
+

Significant syscalls blocked by the default profile

Docker’s default seccomp profile is an allowlist which specifies the calls that are allowed. The table below lists the significant (but not all) syscalls that are effectively blocked because they are not on the Allowlist. The table includes the reason each syscall is blocked rather than white-listed.

Syscall Description
acct Accounting syscall which could let containers disable their own resource limits or process accounting. Also gated by CAP_SYS_PACCT.
add_key Prevent containers from using the kernel keyring, which is not namespaced.
bpf Deny loading potentially persistent bpf programs into kernel, already gated by CAP_SYS_ADMIN.
clock_adjtime Time/date is not namespaced. Also gated by CAP_SYS_TIME.
clock_settime Time/date is not namespaced. Also gated by CAP_SYS_TIME.
clone Deny cloning new namespaces. Also gated by CAP_SYS_ADMIN for CLONE_* flags, except CLONE_NEWUSER.
create_module Deny manipulation and functions on kernel modules. Obsolete. Also gated by CAP_SYS_MODULE.
delete_module Deny manipulation and functions on kernel modules. Also gated by CAP_SYS_MODULE.
finit_module Deny manipulation and functions on kernel modules. Also gated by CAP_SYS_MODULE.
get_kernel_syms Deny retrieval of exported kernel and module symbols. Obsolete.
get_mempolicy Syscall that modifies kernel memory and NUMA settings. Already gated by CAP_SYS_NICE.
init_module Deny manipulation and functions on kernel modules. Also gated by CAP_SYS_MODULE.
ioperm Prevent containers from modifying kernel I/O privilege levels. Already gated by CAP_SYS_RAWIO.
iopl Prevent containers from modifying kernel I/O privilege levels. Already gated by CAP_SYS_RAWIO.
kcmp Restrict process inspection capabilities, already blocked by dropping CAP_SYS_PTRACE.
kexec_file_load Sister syscall of kexec_load that does the same thing, slightly different arguments. Also gated by CAP_SYS_BOOT.
kexec_load Deny loading a new kernel for later execution. Also gated by CAP_SYS_BOOT.
keyctl Prevent containers from using the kernel keyring, which is not namespaced.
lookup_dcookie Tracing/profiling syscall, which could leak a lot of information on the host. Also gated by CAP_SYS_ADMIN.
mbind Syscall that modifies kernel memory and NUMA settings. Already gated by CAP_SYS_NICE.
mount Deny mounting, already gated by CAP_SYS_ADMIN.
move_pages Syscall that modifies kernel memory and NUMA settings.
name_to_handle_at Sister syscall to open_by_handle_at. Already gated by CAP_DAC_READ_SEARCH.
nfsservctl Deny interaction with the kernel nfs daemon. Obsolete since Linux 3.1.
open_by_handle_at Cause of an old container breakout. Also gated by CAP_DAC_READ_SEARCH.
perf_event_open Tracing/profiling syscall, which could leak a lot of information on the host.
personality Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns.
pivot_root Deny pivot_root, should be privileged operation.
process_vm_readv Restrict process inspection capabilities, already blocked by dropping CAP_SYS_PTRACE.
process_vm_writev Restrict process inspection capabilities, already blocked by dropping CAP_SYS_PTRACE.
ptrace Tracing/profiling syscall. Blocked in Linux kernel versions before 4.8 to avoid seccomp bypass. Tracing/profiling arbitrary processes is already blocked by dropping CAP_SYS_PTRACE, because it could leak a lot of information on the host.
query_module Deny manipulation and functions on kernel modules. Obsolete.
quotactl Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by CAP_SYS_ADMIN.
reboot Don’t let containers reboot the host. Also gated by CAP_SYS_BOOT.
request_key Prevent containers from using the kernel keyring, which is not namespaced.
set_mempolicy Syscall that modifies kernel memory and NUMA settings. Already gated by CAP_SYS_NICE.
setns Deny associating a thread with a namespace. Also gated by CAP_SYS_ADMIN.
settimeofday Time/date is not namespaced. Also gated by CAP_SYS_TIME.
stime Time/date is not namespaced. Also gated by CAP_SYS_TIME.
swapon Deny start/stop swapping to file/device. Also gated by CAP_SYS_ADMIN.
swapoff Deny start/stop swapping to file/device. Also gated by CAP_SYS_ADMIN.
sysfs Obsolete syscall.
_sysctl Obsolete, replaced by /proc/sys.
umount Should be a privileged operation. Also gated by CAP_SYS_ADMIN.
umount2 Should be a privileged operation. Also gated by CAP_SYS_ADMIN.
unshare Deny cloning new namespaces for processes. Also gated by CAP_SYS_ADMIN, with the exception of unshare --user.
uselib Older syscall related to shared libraries, unused for a long time.
userfaultfd Userspace page fault handling, largely needed for process migration.
ustat Obsolete syscall.
vm86 In kernel x86 real mode virtual machine. Also gated by CAP_SYS_ADMIN.
vm86old In kernel x86 real mode virtual machine. Also gated by CAP_SYS_ADMIN.

Run without the default seccomp profile

You can pass unconfined to run a container without the default seccomp profile.

$ docker run --rm -it --security-opt seccomp=unconfined debian:jessie \
+    unshare --map-root-user --user sh -c whoami
+
+

seccomp, security, docker, documentation

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/seccomp/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Ftrust%2Fdeploying_notary%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Fdeploying_notary%2Findex.html new file mode 100644 index 00000000..29ca9ef3 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Fdeploying_notary%2Findex.html @@ -0,0 +1,11 @@ +

Deploy Notary Server with Compose

+

The easiest way to deploy Notary Server is by using Docker Compose. To follow the procedure on this page, you must have already installed Docker Compose.

  1. Clone the Notary repository.

    git clone https://github.com/theupdateframework/notary.git
    +
  2. Build and start Notary Server with the sample certificates.

    docker-compose up -d
    +

For more detailed documentation about how to deploy Notary Server, see the instructions to run a Notary service as well as the Notary repository for more information.

  1. Make sure that your Docker or Notary client trusts Notary Server’s certificate before you try to interact with the Notary server.

See the instructions for Docker or for Notary depending on which one you are using.

If you want to use Notary in production

Check back here for instructions after Notary Server has an official stable release. To get a head start on deploying Notary in production, see the Notary repository.

+

trust, security, notary, deployment

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/trust/deploying_notary/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Ftrust%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Findex.html new file mode 100644 index 00000000..604b4152 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Findex.html @@ -0,0 +1,69 @@ +

Content trust in Docker

+ +

When transferring data among networked systems, trust is a central concern. In particular, when communicating over an untrusted medium such as the internet, it is critical to ensure the integrity and the publisher of all the data a system operates on. You use the Docker Engine to push and pull images (data) to a public or private registry. Content trust gives you the ability to verify both the integrity and the publisher of all the data received from a registry over any channel.

About Docker Content Trust (DCT)

Docker Content Trust (DCT) provides the ability to use digital signatures for data sent to and received from remote Docker registries. These signatures allow client-side or runtime verification of the integrity and publisher of specific image tags.

Through DCT, image publishers can sign their images and image consumers can ensure that the images they pull are signed. Publishers could be individuals or organizations manually signing their content or automated software supply chains signing content as part of their release process.

Image tags and DCT

An individual image record has the following identifier:

[REGISTRY_HOST[:REGISTRY_PORT]/]REPOSITORY[:TAG]
+

A particular image REPOSITORY can have multiple tags. For example, latest and 3.1.2 are both tags on the mongo image. An image publisher can build an image and tag combination many times changing the image with each build.

DCT is associated with the TAG portion of an image. Each image repository has a set of keys that image publishers use to sign an image tag. Image publishers have discretion on which tags they sign.

An image repository can contain an image with one tag that is signed and another tag that is not. For example, consider the Mongo image repository. The latest tag could be unsigned while the 3.1.6 tag could be signed. It is the responsibility of the image publisher to decide if an image tag is signed or not. In this representation, some image tags are signed, others are not:

Signed tags

Publishers can choose to sign a specific tag or not. As a result, the content of an unsigned tag and that of a signed tag with the same name may not match. For example, a publisher can push a tagged image someimage:latest and sign it. Later, the same publisher can push an unsigned someimage:latest image. This second push replaces the last unsigned tag latest but does not affect the signed latest version. The ability to choose which tags they can sign, allows publishers to iterate over the unsigned version of an image before officially signing it.

Image consumers can enable DCT to ensure that images they use were signed. If a consumer enables DCT, they can only pull, run, or build with trusted images. Enabling DCT is a bit like applying a “filter” to your registry. Consumers “see” only signed image tags and the less desirable, unsigned image tags are “invisible” to them.

Trust view

To the consumer who has not enabled DCT, nothing about how they work with Docker images changes. Every image is visible regardless of whether it is signed or not.

Docker Content Trust Keys

Trust for an image tag is managed through the use of signing keys. A key set is created when an operation using DCT is first invoked. A key set consists of the following classes of keys:

The following image depicts the various signing keys and their relationships:

Content Trust components

WARNING

Loss of the root key is very difficult to recover from. Correcting this loss requires intervention from Docker Support to reset the repository state. This loss also requires manual intervention from every consumer that used a signed tag from this repository prior to the loss.

You should back up the root key somewhere safe. Given that it is only required to create new repositories, it is a good idea to store it offline in hardware. For details on securing, and backing up your keys, make sure you read how to manage keys for DCT.

Signing Images with Docker Content Trust

Within the Docker CLI we can sign and push a container image with the $ docker trust command syntax. This is built on top of the Notary feature set. For more information, see the Notary GitHub repository.

A prerequisite for signing an image is a Docker Registry with a Notary server attached (Such as the Docker Hub ). Instructions for standing up a self-hosted environment can be found here.

To sign a Docker Image you will need a delegation key pair. These keys can be generated locally using $ docker trust key generate or generated by a certificate authority.

First we will add the delegation private key to the local Docker trust repository. (By default this is stored in ~/.docker/trust/). If you are generating delegation keys with $ docker trust key generate, the private key is automatically added to the local trust store. If you are importing a separate key, you will need to use the $ docker trust key load command.

$ docker trust key generate jeff
+Generating key for jeff...
+Enter passphrase for new jeff key with ID 9deed25:
+Repeat passphrase for new jeff key with ID 9deed25:
+Successfully generated and loaded private key. Corresponding public key available: /home/ubuntu/Documents/mytrustdir/jeff.pub
+

Or if you have an existing key:

$ docker trust key load key.pem --name jeff
+Loading key from "key.pem"...
+Enter passphrase for new jeff key with ID 8ae710e:
+Repeat passphrase for new jeff key with ID 8ae710e:
+Successfully imported key from key.pem
+

Next we will need to add the delegation public key to the Notary server; this is specific to a particular image repository in Notary known as a Global Unique Name (GUN). If this is the first time you are adding a delegation to that repository, this command will also initiate the repository, using a local Notary canonical root key. To understand more about initiating a repository, and the role of delegations, head to delegations for content trust.

$ docker trust signer add --key cert.pem jeff registry.example.com/admin/demo
+Adding signer "jeff" to registry.example.com/admin/demo...
+Enter passphrase for new repository key with ID 10b5e94:
+

Finally, we will use the delegation private key to sign a particular tag and push it up to the registry.

$ docker trust sign registry.example.com/admin/demo:1
+Signing and pushing trust data for local image registry.example.com/admin/demo:1, may overwrite remote trust data
+The push refers to repository [registry.example.com/admin/demo]
+7bff100f35cb: Pushed
+1: digest: sha256:3d2e482b82608d153a374df3357c0291589a61cc194ec4a9ca2381073a17f58e size: 528
+Signing and pushing trust metadata
+Enter passphrase for signer key with ID 8ae710e:
+Successfully signed registry.example.com/admin/demo:1
+

Alternatively, once the keys have been imported an image can be pushed with the $ docker push command, by exporting the DCT environmental variable.

$ export DOCKER_CONTENT_TRUST=1
+
+$ docker push registry.example.com/admin/demo:1
+The push refers to repository [registry.example.com/admin/demo:1]
+7bff100f35cb: Pushed
+1: digest: sha256:3d2e482b82608d153a374df3357c0291589a61cc194ec4a9ca2381073a17f58e size: 528
+Signing and pushing trust metadata
+Enter passphrase for signer key with ID 8ae710e:
+Successfully signed registry.example.com/admin/demo:1
+

Remote trust data for a tag or a repository can be viewed by the $ docker trust inspect command:

$ docker trust inspect --pretty registry.example.com/admin/demo:1
+
+Signatures for registry.example.com/admin/demo:1
+
+SIGNED TAG          DIGEST                                                             SIGNERS
+1                   3d2e482b82608d153a374df3357c0291589a61cc194ec4a9ca2381073a17f58e   jeff
+
+List of signers and their keys for registry.example.com/admin/demo:1
+
+SIGNER              KEYS
+jeff                8ae710e3ba82
+
+Administrative keys for registry.example.com/admin/demo:1
+
+  Repository Key:	10b5e94c916a0977471cc08fa56c1a5679819b2005ba6a257aa78ce76d3a1e27
+  Root Key:	84ca6e4416416d78c4597e754f38517bea95ab427e5f95871f90d460573071fc
+

Remote Trust data for a tag can be removed by the $ docker trust revoke command:

$ docker trust revoke registry.example.com/admin/demo:1
+Enter passphrase for signer key with ID 8ae710e:
+Successfully deleted signature for registry.example.com/admin/demo:1
+

Client Enforcement with Docker Content Trust

Content trust is disabled by default in the Docker Client. To enable it, set the DOCKER_CONTENT_TRUST environment variable to 1. This prevents users from working with tagged images unless they contain a signature.

When DCT is enabled in the Docker client, docker CLI commands that operate on tagged images must either have content signatures or explicit content hashes. The commands that operate with DCT are:

For example, with DCT enabled a docker pull someimage:latest only succeeds if someimage:latest is signed. However, an operation with an explicit content hash always succeeds as long as the hash exists:

$ docker pull registry.example.com/user/image:1
+Error: remote trust data does not exist for registry.example.com/user/image: registry.example.com does not have trust data for registry.example.com/user/image
+
+$ docker pull registry.example.com/user/image@sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a
+sha256:ee7491c9c31db1ffb7673d91e9fac5d6354a89d0e97408567e09df069a1687c1: Pulling from user/image
+ff3a5c916c92: Pull complete
+a59a168caba3: Pull complete
+Digest: sha256:ee7491c9c31db1ffb7673d91e9fac5d6354a89d0e97408567e09df069a1687c1
+Status: Downloaded newer image for registry.example.com/user/image@sha256:ee7491c9c31db1ffb7673d91e9fac5d6354a89d0e97408567e09df069a1687c1
+
+

content, trust, security, docker, documentation

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/trust/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_automation%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_automation%2Findex.html new file mode 100644 index 00000000..fbdbd7ea --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_automation%2Findex.html @@ -0,0 +1,48 @@ +

Automation with content trust

+ +

It is very common for Docker Content Trust to be built into existing automation systems. To allow tools to wrap Docker and push trusted content, there are environment variables that can be passed through to the client.

This guide follows the steps as described here so please read that and understand its prerequisites.

When working directly with the Notary client, it uses its own set of environment variables.

Add a delegation private key

To automate importing a delegation private key to the local Docker trust store, we need to pass a passphrase for the new key. This passphrase will be required everytime that delegation signs a tag.

$ export DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE="mypassphrase123"
+
+$ docker trust key load delegation.key --name jeff
+Loading key from "delegation.key"...
+Successfully imported key from delegation.key
+

Add a delegation public key

If you initialising a repository at the same time as adding a Delegation public key, then you will need to use the local Notary Canonical Root Key’s passphrase to create the repositories trust data. If the repository has already been initiated then you only need the repositories passphrase.

# Export the Local Root Key Passphrase if required.
+$ export DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE="rootpassphrase123"
+
+# Export the Repository Passphrase
+$ export DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE="repopassphrase123"
+
+# Initialise Repo and Push Delegation
+$ docker trust signer add --key delegation.crt jeff registry.example.com/admin/demo
+Adding signer "jeff" to registry.example.com/admin/demo...
+Initializing signed repository for registry.example.com/admin/demo...
+Successfully initialized "registry.example.com/admin/demo"
+Successfully added signer: registry.example.com/admin/demo
+

Sign an image

Finally when signing an image, we will need to export the passphrase of the signing key. This was created when the key was loaded into the local Docker trust store with $ docker trust key load.

$ export DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE="mypassphrase123"
+
+$ docker trust sign registry.example.com/admin/demo:1
+Signing and pushing trust data for local image registry.example.com/admin/demo:1, may overwrite remote trust data
+The push refers to repository [registry.example.com/admin/demo]
+428c97da766c: Layer already exists
+2: digest: sha256:1a6fd470b9ce10849be79e99529a88371dff60c60aab424c077007f6979b4812 size: 524
+Signing and pushing trust metadata
+Successfully signed registry.example.com/admin/demo:1
+

Build with content trust

You can also build with content trust. Before running the docker build command, you should set the environment variable DOCKER_CONTENT_TRUST either manually or in a scripted fashion. Consider the simple Dockerfile below.

# syntax=docker/dockerfile:1
+FROM docker/trusttest:latest
+RUN echo
+

The FROM tag is pulling a signed image. You cannot build an image that has a FROM that is not either present locally or signed. Given that content trust data exists for the tag latest, the following build should succeed:

$  docker build -t docker/trusttest:testing .
+Using default tag: latest
+latest: Pulling from docker/trusttest
+
+b3dbab3810fc: Pull complete
+a9539b34a6ab: Pull complete
+Digest: sha256:d149ab53f871
+

If content trust is enabled, building from a Dockerfile that relies on tag without trust data, causes the build command to fail:

$  docker build -t docker/trusttest:testing .
+unable to process Dockerfile: No trust data for notrust
+
+

trust, security, docker, documentation, automation

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/trust/trust_automation/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_delegation%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_delegation%2Findex.html new file mode 100644 index 00000000..e219395c --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_delegation%2Findex.html @@ -0,0 +1,197 @@ +

Delegations for content trust

+ +

Delegations in Docker Content Trust (DCT) allow you to control who can and cannot sign an image tag. A delegation will have a pair of private and public delegation keys. A delegation could contain multiple pairs of keys and contributors in order to a) allow multiple users to be part of a delegation, and b) to support key rotation.

The most important delegation within Docker Content Trust is targets/releases. This is seen as the canonical source of a trusted image tag, and without a contributor’s key being under this delegation, they will be unable to sign a tag.

Fortunately when using the $ docker trust commands, we will automatically initialize a repository, manage the repository keys, and add a collaborator’s key to the targets/releases delegation via docker trust signer add.

Configuring the Docker Client

By default, the $ docker trust commands expect the notary server URL to be the same as the registry URL specified in the image tag (following a similar logic to $ docker push). When using Docker Hub or DTR, the notary server URL is the same as the registry URL. However, for self-hosted environments or 3rd party registries, you will need to specify an alternative URL for the notary server. This is done with:

$ export DOCKER_CONTENT_TRUST_SERVER=https://<URL>:<PORT>
+

If you do not export this variable in self-hosted environments, you may see errors such as:

$ docker trust signer add --key cert.pem jeff registry.example.com/admin/demo
+Adding signer "jeff" to registry.example.com/admin/demo...
+<...>
+Error: trust data missing for remote repository registry.example.com/admin/demo or remote repository not found: timestamp key trust data unavailable.  Has a notary repository been initialized?
+
+$ docker trust inspect registry.example.com/admin/demo --pretty
+WARN[0000] Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely
+<...>
+

If you have enabled authentication for your notary server, or are using DTR, you will need to log in before you can push data to the notary server.

$ docker login registry.example.com/user/repo
+Username: admin
+Password:
+
+Login Succeeded
+
+$ docker trust signer add --key cert.pem jeff registry.example.com/user/repo
+Adding signer "jeff" to registry.example.com/user/repo...
+Initializing signed repository for registry.example.com/user/repo...
+Successfully initialized "registry.example.com/user/repo"
+Successfully added signer: jeff to registry.example.com/user/repo
+

If you do not log in, you will see:

$ docker trust signer add --key cert.pem jeff registry.example.com/user/repo
+Adding signer "jeff" to registry.example.com/user/repo...
+Initializing signed repository for registry.example.com/user/repo...
+you are not authorized to perform this operation: server returned 401.
+
+Failed to add signer to: registry.example.com/user/repo
+

Configuring the Notary Client

Some of the more advanced features of DCT require the Notary CLI. To install and configure the Notary CLI:

  1. Download the client and ensure that it is available on your path.

  2. Create a configuration file at ~/.notary/config.json with the following content:

{
+  "trust_dir" : "~/.docker/trust",
+  "remote_server": {
+    "url": "https://registry.example.com",
+    "root_ca": "../.docker/ca.pem"
+  }
+}
+

The newly created configuration file contains information about the location of your local Docker trust data and the notary server URL.

For more detailed information about how to use notary outside of the Docker Content Trust use cases, refer to the Notary CLI documentation here

Creating Delegation Keys

A prerequisite to adding your first contributor is a pair of delegation keys. These keys can either be generated locally using $ docker trust, generated by a certificate authority.

Using Docker Trust to Generate Keys

Docker trust has a built-in generator for a delegation key pair, $ docker trust generate <name>. Running this command will automatically load the delegation private key in to the local Docker trust store.

$ docker trust key generate jeff
+
+Generating key for jeff...
+Enter passphrase for new jeff key with ID 9deed25: 
+Repeat passphrase for new jeff key with ID 9deed25: 
+Successfully generated and loaded private key. Corresponding public key available: /home/ubuntu/Documents/mytrustdir/jeff.pub
+

Manually Generating Keys

If you need to manually generate a private key (either RSA or ECDSA) and a x509 certificate containing the public key, you can use local tools like openssl or cfssl along with a local or company-wide Certificate Authority.

Here is an example of how to generate a 2048-bit RSA portion key (all RSA keys must be at least 2048 bits):

$ openssl genrsa -out delegation.key 2048
+
+Generating RSA private key, 2048 bit long modulus
+....................................................+++
+............+++
+e is 65537 (0x10001)
+

They should keep delegation.key private because it is used to sign tags.

Then they need to generate an x509 certificate containing the public key, which is what you need from them. Here is the command to generate a CSR (certificate signing request):

$ openssl req -new -sha256 -key delegation.key -out delegation.csr
+

Then they can send it to whichever CA you trust to sign certificates, or they can self-sign the certificate (in this example, creating a certificate that is valid for 1 year):

$ openssl x509 -req -sha256 -days 365 -in delegation.csr -signkey delegation.key -out delegation.crt
+

Then they need to give you delegation.crt, whether it is self-signed or signed by a CA.

Finally you will need to add the private key into your local Docker trust store.

$ docker trust key load delegation.key --name jeff
+
+Loading key from "delegation.key"...
+Enter passphrase for new jeff key with ID 8ae710e: 
+Repeat passphrase for new jeff key with ID 8ae710e: 
+Successfully imported key from delegation.key
+

Viewing local Delegation keys

To list the keys that have been imported in to the local Docker trust store we can use the Notary CLI.

$ notary key list
+
+ROLE       GUN                          KEY ID                                                              LOCATION
+----       ---                          ------                                                              --------
+root                                    f6c6a4b00fefd8751f86194c7d87a3bede444540eb3378c4a11ce10852ab1f96    /home/ubuntu/.docker/trust/private
+jeff                                    9deed251daa1aa6f9d5f9b752847647cf8d705da0763aa5467650d0987ed5306    /home/ubuntu/.docker/trust/private
+

Managing Delegations in a Notary Server

When the first Delegation is added to the Notary Server using $ docker trust, we automatically initiate trust data for the repository. This includes creating the notary target and snapshots keys, and rotating the snapshot key to be managed by the notary server. More information on these keys can be found here

When initiating a repository, you will need the key and the passphrase of a local Notary Canonical Root Key. If you have not initiated a repository before, and therefore don’t have a Notary root key, $ docker trust will create one for you.

Be sure to protect and back up your Notary Canonical Root Key

Initiating the Repository

To upload the first key to a delegation, at the same time initiating a repository, you can use the $ docker trust signer add command. This will add the contributor’s public key to the targets/releases delegation, and create a second targets/<name> delegation.

For DCT the name of the second delegation, in the below example jeff, is there to help you keep track of the owner of the keys. In more advanced use cases of Notary additional delegations are used for hierarchy.

$ docker trust signer add --key cert.pem jeff registry.example.com/admin/demo
+
+Adding signer "jeff" to registry.example.com/admin/demo...
+Initializing signed repository for registry.example.com/admin/demo...
+Enter passphrase for root key with ID f6c6a4b: 
+Enter passphrase for new repository key with ID b0014f8: 
+Repeat passphrase for new repository key with ID b0014f8: 
+Successfully initialized "registry.example.com/admin/demo"
+Successfully added signer: jeff to registry.example.com/admin/demo
+

You can see which keys have been pushed to the Notary server for each repository with the $ docker trust inspect command.

$ docker trust inspect --pretty registry.example.com/admin/demo
+
+No signatures for registry.example.com/admin/demo
+
+
+List of signers and their keys for registry.example.com/admin/demo
+
+SIGNER              KEYS
+jeff                1091060d7bfd
+
+Administrative keys for registry.example.com/admin/demo
+
+  Repository Key:	b0014f8e4863df2d028095b74efcb05d872c3591de0af06652944e310d96598d
+  Root Key:	64d147e59e44870311dd2d80b9f7840039115ef3dfa5008127d769a5f657a5d7
+

You could also use the Notary CLI to list delegations and keys. Here you can clearly see the keys were attached to targets/releases and targets/jeff.

$ notary delegation list registry.example.com/admin/demo
+
+ROLE                PATHS             KEY IDS                                                             THRESHOLD
+----                -----             -------                                                             ---------
+targets/jeff        "" <all paths>    1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1    1
+                                          
+targets/releases    "" <all paths>    1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1    1 
+

Adding Additional Signers

Docker Trust allows you to configure multiple delegations per repository, allowing you to manage the lifecycle of delegations. When adding additional delegations with $ docker trust the collaborators key is once again added to the targets/release role.

Note you will need the passphrase for the repository key; this would have been configured when you first initiated the repository.

$ docker trust signer add --key ben.pub ben registry.example.com/admin/demo
+
+Adding signer "ben" to registry.example.com/admin/demo...
+Enter passphrase for repository key with ID b0014f8: 
+Successfully added signer: ben to registry.example.com/admin/demo
+

Check to prove that there are now 2 delegations (Signer).

$ docker trust inspect --pretty registry.example.com/admin/demo
+
+No signatures for registry.example.com/admin/demo
+
+List of signers and their keys for registry.example.com/admin/demo
+
+SIGNER              KEYS
+ben                 afa404703b25
+jeff                1091060d7bfd
+
+Administrative keys for registry.example.com/admin/demo
+
+  Repository Key:	b0014f8e4863df2d028095b74efcb05d872c3591de0af06652944e310d96598d
+  Root Key:	64d147e59e44870311dd2d80b9f7840039115ef3dfa5008127d769a5f657a5d7
+

Adding Keys to an Existing Delegation

To support things like key rotation and expiring / retiring keys you can publish multiple contributor keys per delegation. The only prerequisite here is to make sure you use the same the delegation name, in this case jeff. Docker trust will automatically handle adding this new key to targets/releases.

Note you will need the passphrase for the repository key; this would have been configured when you first initiated the repository.

$ docker trust signer add --key cert2.pem jeff registry.example.com/admin/demo
+
+Adding signer "jeff" to registry.example.com/admin/demo...
+Enter passphrase for repository key with ID b0014f8: 
+Successfully added signer: jeff to registry.example.com/admin/demo
+

Check to prove that the delegation (Signer) now contains multiple Key IDs.

$ docker trust inspect --pretty registry.example.com/admin/demo
+
+No signatures for registry.example.com/admin/demo
+
+
+List of signers and their keys for registry.example.com/admin/demo
+
+SIGNER              KEYS
+jeff                1091060d7bfd, 5570b88df073
+
+Administrative keys for registry.example.com/admin/demo
+
+  Repository Key:	b0014f8e4863df2d028095b74efcb05d872c3591de0af06652944e310d96598d
+  Root Key:	64d147e59e44870311dd2d80b9f7840039115ef3dfa5008127d769a5f657a5d7
+

Removing a Delegation

If you need to remove a delegation, including the contributor keys that are attached to the targets/releases role, you can use the $ docker trust signer remove command.

Note tags that were signed by the removed delegation will need to be resigned by an active delegation

$ docker trust signer remove registry.example.com/admin/demo
+Removing signer "ben" from registry.example.com/admin/demo...
+Enter passphrase for repository key with ID b0014f8: 
+Successfully removed ben from registry.example.com/admin/demo
+

Troubleshooting

1) If you see an error that there are no usable keys in targets/releases, you will need to add additional delegations using docker trust signer add before resigning images.

WARN[0000] role targets/releases has fewer keys than its threshold of 1; it will not be usable until keys are added to it
+

2) If you have added additional delegations already and are seeing an error message that there are no valid signatures in targest/releases, you will need to resign the targets/releases delegation file with the Notary CLI.

WARN[0000] Error getting targets/releases: valid signatures did not meet threshold for targets/releases 
+

Resigning the delegation file is done with the $ notary witness command

$ notary witness registry.example.com/admin/demo targets/releases --publish
+

More information on the $ notary witness command can be found here

Removing a Contributor’s Key from a Delegation

As part of rotating keys for a delegation, you may want to remove an individual key but retain the delegation. This can be done with the Notary CLI.

Remember you will have to remove the key from both the targets/releases role and the role specific to that signer targets/<name>.

1) We will need to grab the Key ID from the Notary Server

$ notary delegation list registry.example.com/admin/demo
+
+ROLE                PATHS             KEY IDS                                                             THRESHOLD
+----                -----             -------                                                             ---------
+targets/jeff        "" <all paths>    8fb597cbaf196f0781628b2f52bff6b3912e4e8075720378fda60d17232bbcf9    1
+                                      1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1    
+targets/releases    "" <all paths>    8fb597cbaf196f0781628b2f52bff6b3912e4e8075720378fda60d17232bbcf9    1
+                                      1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1    
+

2) Remove from the targets/releases delegation

$ notary delegation remove registry.example.com/admin/demo targets/releases 1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1 --publish
+
+Auto-publishing changes to registry.example.com/admin/demo
+Enter username: admin
+Enter password: 
+Enter passphrase for targets key with ID b0014f8: 
+Successfully published changes for repository registry.example.com/admin/demo
+

3) Remove from the targets/<name> delegation

$ notary delegation remove registry.example.com/admin/demo targets/jeff 1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1 --publish
+
+Removal of delegation role targets/jeff with keys [5570b88df0736c468493247a07e235e35cf3641270c944d0e9e8899922fc6f99], to repository "registry.example.com/admin/demo" staged for next publish.
+
+Auto-publishing changes to registry.example.com/admin/demo
+Enter username: admin    
+Enter password: 
+Enter passphrase for targets key with ID b0014f8: 
+Successfully published changes for repository registry.example.com/admin/demo
+

4) Check the remaining delegation list

$ notary delegation list registry.example.com/admin/demo
+
+ROLE                PATHS             KEY IDS                                                             THRESHOLD
+----                -----             -------                                                             ---------
+targets/jeff        "" <all paths>    8fb597cbaf196f0781628b2f52bff6b3912e4e8075720378fda60d17232bbcf9    1    
+targets/releases    "" <all paths>    8fb597cbaf196f0781628b2f52bff6b3912e4e8075720378fda60d17232bbcf9    1    
+

Removing a local Delegation Private Key

As part of rotating delegation keys, you may need to remove a local delegation key from the local Docker trust store. This is done with the Notary CLI, using the $ notary key remove command.

1) We will need to get the Key ID from the local Docker Trust store

$ notary key list
+
+ROLE       GUN                          KEY ID                                                              LOCATION
+----       ---                          ------                                                              --------
+root                                    f6c6a4b00fefd8751f86194c7d87a3bede444540eb3378c4a11ce10852ab1f96    /home/ubuntu/.docker/trust/private
+admin                                   8fb597cbaf196f0781628b2f52bff6b3912e4e8075720378fda60d17232bbcf9    /home/ubuntu/.docker/trust/private
+jeff                                    1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1    /home/ubuntu/.docker/trust/private
+targets    ...example.com/admin/demo    c819f2eda8fba2810ec6a7f95f051c90276c87fddfc3039058856fad061c009d    /home/ubuntu/.docker/trust/private
+

2) Remove the key from the local Docker Trust store

$ notary key remove 1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1
+
+Are you sure you want to remove 1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1 (role jeff) from /home/ubuntu/.docker/trust/private?  (yes/no)  y
+
+Deleted 1091060d7bfd938dfa5be703fa057974f9322a4faef6f580334f3d6df44c02d1 (role jeff) from /home/ubuntu/.docker/trust/private.
+

Removing all trust data from a Repository

You can remove all trust data from a repository, including repository, target, snapshot and all delegations keys using the Notary CLI.

This is often required by a container registry before a particular repository can be deleted.

$ notary delete registry.example.com/admin/demo --remote
+
+Deleting trust data for repository registry.example.com/admin/demo
+Enter username: admin
+Enter password: 
+Successfully deleted local and remote trust data for repository registry.example.com/admin/demo
+
+$ docker trust inspect --pretty registry.example.com/admin/demo
+
+No signatures or cannot access registry.example.com/admin/demo
+
+

trust, security, delegations, keys, repository

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/trust/trust_delegation/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_key_mng%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_key_mng%2Findex.html new file mode 100644 index 00000000..e018b171 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_key_mng%2Findex.html @@ -0,0 +1,12 @@ +

Manage keys for content trust

+ +

Trust for an image tag is managed through the use of keys. Docker’s content trust makes use of five different types of keys:

Key Description
root key Root of content trust for an image tag. When content trust is enabled, you create the root key once. Also known as the offline key, because it should be kept offline.
targets This key allows you to sign image tags, to manage delegations including delegated keys or permitted delegation paths. Also known as the repository key, since this key determines what tags can be signed into an image repository.
snapshot This key signs the current collection of image tags, preventing mix and match attacks.
timestamp This key allows Docker image repositories to have freshness security guarantees without requiring periodic content refreshes on the client’s side.
delegation Delegation keys are optional tagging keys and allow you to delegate signing image tags to other publishers without having to share your targets key.

When doing a docker push with Content Trust enabled for the first time, the root, targets, snapshot, and timestamp keys are generated automatically for the image repository:

Delegation keys are optional, and not generated as part of the normal docker workflow. They need to be manually generated and added to the repository.

Note: Prior to Docker Engine 1.11, the snapshot key was also generated and stored locally client-side. Use the Notary CLI to manage your snapshot key locally again for repositories created with newer versions of Docker.

Choose a passphrase

The passphrases you chose for both the root key and your repository key should be randomly generated and stored in a password manager. Having the repository key allows users to sign image tags on a repository. Passphrases are used to encrypt your keys at rest and ensure that a lost laptop or an unintended backup doesn’t put the private key material at risk.

Back up your keys

All the Docker trust keys are stored encrypted using the passphrase you provide on creation. Even so, you should still take care of the location where you back them up. Good practice is to create two encrypted USB keys.

It is very important that you back up your keys to a safe, secure location. Loss of the repository key is recoverable; loss of the root key is not.

The Docker client stores the keys in the ~/.docker/trust/private directory. Before backing them up, you should tar them into an archive:

$ umask 077; tar -zcvf private_keys_backup.tar.gz ~/.docker/trust/private; umask 022
+

Hardware storage and signing

Docker Content Trust can store and sign with root keys from a Yubikey 4. The Yubikey is prioritized over keys stored in the filesystem. When you initialize a new repository with content trust, Docker Engine looks for a root key locally. If a key is not found and the Yubikey 4 exists, Docker Engine creates a root key in the Yubikey 4. Consult the Notary documentation for more details.

Prior to Docker Engine 1.11, this feature was only in the experimental branch.

Lost keys

If a publisher loses keys it means losing the ability to sign trusted content for your repositories. If you lose a key, send an email to Docker Hub Support to reset the repository state.

This loss also requires manual intervention from every consumer that pulled the tagged image prior to the loss. Image consumers would get an error for content that they already downloaded:

Warning: potential malicious behavior - trust data has insufficient signatures for remote repository docker.io/my/image: valid signatures did not meet threshold
+

To correct this, they need to download a new image tag that is signed with the new key.

+

trust, security, root, keys, repository

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/trust/trust_key_mng/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_sandbox%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_sandbox%2Findex.html new file mode 100644 index 00000000..fa22410c --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Ftrust%2Ftrust_sandbox%2Findex.html @@ -0,0 +1,123 @@ +

Play in a content trust sandbox

+ +

This page explains how to set up and use a sandbox for experimenting with trust. The sandbox allows you to configure and try trust operations locally without impacting your production images.

Before working through this sandbox, you should have read through the trust overview.

Prerequisites

These instructions assume you are running in Linux or macOS. You can run this sandbox on a local machine or on a virtual machine. You need to have privileges to run docker commands on your local machine or in the VM.

This sandbox requires you to install two Docker tools: Docker Engine >= 1.10.0 and Docker Compose >= 1.6.0. To install the Docker Engine, choose from the list of supported platforms. To install Docker Compose, see the detailed instructions here.

What is in the sandbox?

If you are just using trust out-of-the-box you only need your Docker Engine client and access to the Docker Hub. The sandbox mimics a production trust environment, and sets up these additional components.

Container Description
trustsandbox A container with the latest version of Docker Engine and with some preconfigured certificates. This is your sandbox where you can use the docker client to test trust operations.
Registry server A local registry service.
Notary server The service that does all the heavy-lifting of managing trust

This means you run your own content trust (Notary) server and registry. If you work exclusively with the Docker Hub, you would not need these components. They are built into the Docker Hub for you. For the sandbox, however, you build your own entire, mock production environment.

Within the trustsandbox container, you interact with your local registry rather than the Docker Hub. This means your everyday image repositories are not used. They are protected while you play.

When you play in the sandbox, you also create root and repository keys. The sandbox is configured to store all the keys and files inside the trustsandbox container. Since the keys you create in the sandbox are for play only, destroying the container destroys them as well.

By using a docker-in-docker image for the trustsandbox container, you also don’t pollute your real Docker daemon cache with any images you push and pull. The images are stored in an anonymous volume attached to this container, and can be destroyed after you destroy the container.

Build the sandbox

In this section, you use Docker Compose to specify how to set up and link together the trustsandbox container, the Notary server, and the Registry server.

  1. Create a new trustsandbox directory and change into it.

     $ mkdir trustsandbox
    + $ cd trustsandbox
    +
  2. Create a file called docker-compose.yml with your favorite editor. For example, using vim:

     $ touch docker-compose.yml
    + $ vim docker-compose.yml
    +
  3. Add the following to the new file.

     version: "2"
    + services:
    +   notaryserver:
    +     image: dockersecurity/notary_autobuilds:server-v0.5.1
    +     volumes:
    +       - notarycerts:/var/lib/notary/fixtures
    +     networks:
    +       - sandbox
    +     environment:
    +       - NOTARY_SERVER_STORAGE_TYPE=memory
    +       - NOTARY_SERVER_TRUST_SERVICE_TYPE=local
    +   sandboxregistry:
    +     image: registry:2.4.1
    +     networks:
    +       - sandbox
    +     container_name: sandboxregistry
    +   trustsandbox:
    +     image: docker:dind
    +     networks:
    +       - sandbox
    +     volumes:
    +       - notarycerts:/notarycerts
    +     privileged: true
    +     container_name: trustsandbox
    +     entrypoint: ""
    +     command: |-
    +         sh -c '
    +             cp /notarycerts/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt &&
    +             update-ca-certificates &&
    +             dockerd-entrypoint.sh --insecure-registry sandboxregistry:5000'
    + volumes:
    +   notarycerts:
    +     external: false
    + networks:
    +   sandbox:
    +     external: false
    +
  4. Save and close the file.

  5. Run the containers on your local system.

     $ docker-compose up -d
    +

    The first time you run this, the docker-in-docker, Notary server, and registry images are downloaded from Docker Hub.

Play in the sandbox

Now that everything is setup, you can go into your trustsandbox container and start testing Docker content trust. From your host machine, obtain a shell in the trustsandbox container.

$ docker container exec -it trustsandbox sh
+/ #
+

Test some trust operations

Now, pull some images from within the trustsandbox container.

  1. Download a docker image to test with.

     / # docker pull docker/trusttest
    + docker pull docker/trusttest
    + Using default tag: latest
    + latest: Pulling from docker/trusttest
    +
    + b3dbab3810fc: Pull complete
    + a9539b34a6ab: Pull complete
    + Digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a
    + Status: Downloaded newer image for docker/trusttest:latest
    +
  2. Tag it to be pushed to our sandbox registry:

     / # docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest
    +
  3. Enable content trust.

     / # export DOCKER_CONTENT_TRUST=1
    +
  4. Identify the trust server.

     / # export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443
    +

    This step is only necessary because the sandbox is using its own server. Normally, if you are using the Docker Public Hub this step isn’t necessary.

  5. Pull the test image.

     / # docker pull sandboxregistry:5000/test/trusttest
    + Using default tag: latest
    + Error: remote trust data does not exist for sandboxregistry:5000/test/trusttest: notaryserver:4443 does not have trust data for sandboxregistry:5000/test/trusttest
    +

    You see an error, because this content doesn’t exist on the notaryserver yet.

  6. Push and sign the trusted image.

     / # docker push sandboxregistry:5000/test/trusttest:latest
    + The push refers to a repository [sandboxregistry:5000/test/trusttest]
    + 5f70bf18a086: Pushed
    + c22f7bc058a9: Pushed
    + latest: digest: sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 size: 734
    + Signing and pushing trust metadata
    + You are about to create a new root signing key passphrase. This passphrase
    + will be used to protect the most sensitive key in your signing system. Please
    + choose a long, complex passphrase and be careful to keep the password and the
    + key file itself secure and backed up. It is highly recommended that you use a
    + password manager to generate the passphrase and keep it safe. There will be no
    + way to recover this key. You can find the key in your config directory.
    + Enter passphrase for new root key with ID 27ec255:
    + Repeat passphrase for new root key with ID 27ec255:
    + Enter passphrase for new repository key with ID 58233f9 (sandboxregistry:5000/test/trusttest):
    + Repeat passphrase for new repository key with ID 58233f9 (sandboxregistry:5000/test/trusttest):
    + Finished initializing "sandboxregistry:5000/test/trusttest"
    + Successfully signed "sandboxregistry:5000/test/trusttest":latest
    +

    Because you are pushing this repository for the first time, Docker creates new root and repository keys and asks you for passphrases with which to encrypt them. If you push again after this, it only asks you for repository passphrase so it can decrypt the key and sign again.

  7. Try pulling the image you just pushed:

     / # docker pull sandboxregistry:5000/test/trusttest
    + Using default tag: latest
    + Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926
    + sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926: Pulling from test/trusttest
    + Digest: sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926
    + Status: Downloaded newer image for sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926
    + Tagging sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926 as sandboxregistry:5000/test/trusttest:latest
    +

Test with malicious images

What happens when data is corrupted and you try to pull it when trust is enabled? In this section, you go into the sandboxregistry and tamper with some data. Then, you try and pull it.

  1. Leave the trustsandbox shell and container running.

  2. Open a new interactive terminal from your host, and obtain a shell into the sandboxregistry container.

    $ docker container exec -it sandboxregistry bash
    +root@65084fc6f047:/#
    +
  3. List the layers for the test/trusttest image you pushed:

    root@65084fc6f047:/# ls -l /var/lib/registry/docker/registry/v2/repositories/test/trusttest/_layers/sha256
    +total 12
    +drwxr-xr-x 2 root root 4096 Jun 10 17:26 a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
    +drwxr-xr-x 2 root root 4096 Jun 10 17:26 aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042
    +drwxr-xr-x 2 root root 4096 Jun 10 17:26 cc7629d1331a7362b5e5126beb5bf15ca0bf67eb41eab994c719a45de53255cd
    +
  4. Change into the registry storage for one of those layers (this is in a different directory):

    root@65084fc6f047:/# cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042
    +
  5. Add malicious data to one of the trusttest layers:

    root@65084fc6f047:/# echo "Malicious data" > data
    +
  6. Go back to your trustsandbox terminal.

  7. List the trusttest image.

    / # docker image ls | grep trusttest
    +REPOSITORY                            TAG                 IMAGE ID            CREATED             SIZE
    +docker/trusttest                      latest              cc7629d1331a        11 months ago       5.025 MB
    +sandboxregistry:5000/test/trusttest   latest              cc7629d1331a        11 months ago       5.025 MB
    +sandboxregistry:5000/test/trusttest   <none>              cc7629d1331a        11 months ago       5.025 MB
    +
  8. Remove the trusttest:latest image from our local cache.

    / # docker image rm -f cc7629d1331a
    +Untagged: docker/trusttest:latest
    +Untagged: sandboxregistry:5000/test/trusttest:latest
    +Untagged: sandboxregistry:5000/test/trusttest@sha256:ebf59c538accdf160ef435f1a19938ab8c0d6bd96aef8d4ddd1b379edf15a926
    +Deleted: sha256:cc7629d1331a7362b5e5126beb5bf15ca0bf67eb41eab994c719a45de53255cd
    +Deleted: sha256:2a1f6535dc6816ffadcdbe20590045e6cbf048d63fd4cc753a684c9bc01abeea
    +Deleted: sha256:c22f7bc058a9a8ffeb32989b5d3338787e73855bf224af7aa162823da015d44c
    +

    Docker does not re-download images that it already has cached, but we want Docker to attempt to download the tampered image from the registry and reject it because it is invalid.

  9. Pull the image again. This downloads the image from the registry, because we don’t have it cached.

    / # docker pull sandboxregistry:5000/test/trusttest
    +Using default tag: latest
    +Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:35d5bc26fd358da8320c137784fe590d8fcf9417263ef261653e8e1c7f15672e
    +sha256:35d5bc26fd358da8320c137784fe590d8fcf9417263ef261653e8e1c7f15672e: Pulling from test/trusttest
    +
    +aac0c133338d: Retrying in 5 seconds
    +a3ed95caeb02: Download complete
    +error pulling image configuration: unexpected EOF
    +

    The pull did not complete because the trust system couldn’t verify the image.

More play in the sandbox

Now, you have a full Docker content trust sandbox on your local system, feel free to play with it and see how it behaves. If you find any security issues with Docker, feel free to send us an email at security@docker.com.

Clean up your sandbox

When you are done, and want to clean up all the services you’ve started and any anonymous volumes that have been created, just run the following command in the directory where you’ve created your Docker Compose file:

    $ docker-compose down -v
+
+

trust, security, root, keys, repository, sandbox

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/trust/trust_sandbox/ +

+
diff --git a/devdocs/docker/engine%2Fsecurity%2Fuserns-remap%2Findex.html b/devdocs/docker/engine%2Fsecurity%2Fuserns-remap%2Findex.html new file mode 100644 index 00000000..e9b3ea56 --- /dev/null +++ b/devdocs/docker/engine%2Fsecurity%2Fuserns-remap%2Findex.html @@ -0,0 +1,46 @@ +

Isolate containers with a user namespace

+ +

Linux namespaces provide isolation for running processes, limiting their access to system resources without the running process being aware of the limitations. For more information on Linux namespaces, see Linux namespaces.

The best way to prevent privilege-escalation attacks from within a container is to configure your container’s applications to run as unprivileged users. For containers whose processes must run as the root user within the container, you can re-map this user to a less-privileged user on the Docker host. The mapped user is assigned a range of UIDs which function within the namespace as normal UIDs from 0 to 65536, but have no privileges on the host machine itself.

About remapping and subordinate user and group IDs

The remapping itself is handled by two files: /etc/subuid and /etc/subgid. Each file works the same, but one is concerned with the user ID range, and the other with the group ID range. Consider the following entry in /etc/subuid:

testuser:231072:65536
+

This means that testuser is assigned a subordinate user ID range of 231072 and the next 65536 integers in sequence. UID 231072 is mapped within the namespace (within the container, in this case) as UID 0 (root). UID 231073 is mapped as UID 1, and so forth. If a process attempts to escalate privilege outside of the namespace, the process is running as an unprivileged high-number UID on the host, which does not even map to a real user. This means the process has no privileges on the host system at all.

Multiple ranges

It is possible to assign multiple subordinate ranges for a given user or group by adding multiple non-overlapping mappings for the same user or group in the /etc/subuid or /etc/subgid file. In this case, Docker uses only the first five mappings, in accordance with the kernel’s limitation of only five entries in /proc/self/uid_map and /proc/self/gid_map.

When you configure Docker to use the userns-remap feature, you can optionally specify an existing user and/or group, or you can specify default. If you specify default, a user and group dockremap is created and used for this purpose.

Warning: Some distributions, such as RHEL and CentOS 7.3, do not automatically add the new group to the /etc/subuid and /etc/subgid files. You are responsible for editing these files and assigning non-overlapping ranges, in this case. This step is covered in Prerequisites.

It is very important that the ranges do not overlap, so that a process cannot gain access in a different namespace. On most Linux distributions, system utilities manage the ranges for you when you add or remove users.

This re-mapping is transparent to the container, but introduces some configuration complexity in situations where the container needs access to resources on the Docker host, such as bind mounts into areas of the filesystem that the system user cannot write to. From a security standpoint, it is best to avoid these situations.

Prerequisites

  1. The subordinate UID and GID ranges must be associated with an existing user, even though the association is an implementation detail. The user owns the namespaced storage directories under /var/lib/docker/. If you don’t want to use an existing user, Docker can create one for you and use that. If you want to use an existing username or user ID, it must already exist. Typically, this means that the relevant entries need to be in /etc/passwd and /etc/group, but if you are using a different authentication back-end, this requirement may translate differently.

    To verify this, use the id command:

    $ id testuser
    +
    +uid=1001(testuser) gid=1001(testuser) groups=1001(testuser)
    +
  2. The way the namespace remapping is handled on the host is using two files, /etc/subuid and /etc/subgid. These files are typically managed automatically when you add or remove users or groups, but on a few distributions such as RHEL and CentOS 7.3, you may need to manage these files manually.

    Each file contains three fields: the username or ID of the user, followed by a beginning UID or GID (which is treated as UID or GID 0 within the namespace) and a maximum number of UIDs or GIDs available to the user. For instance, given the following entry:

    testuser:231072:65536
    +

    This means that user-namespaced processes started by testuser are owned by host UID 231072 (which looks like UID 0 inside the namespace) through 296607 (231072 + 65536 - 1). These ranges should not overlap, to ensure that namespaced processes cannot access each other’s namespaces.

    After adding your user, check /etc/subuid and /etc/subgid to see if your user has an entry in each. If not, you need to add it, being careful to avoid overlap.

    If you want to use the dockremap user automatically created by Docker, check for the dockremap entry in these files after configuring and restarting Docker.

  3. If there are any locations on the Docker host where the unprivileged user needs to write, adjust the permissions of those locations accordingly. This is also true if you want to use the dockremap user automatically created by Docker, but you can’t modify the permissions until after configuring and restarting Docker.

  4. Enabling userns-remap effectively masks existing image and container layers, as well as other Docker objects within /var/lib/docker/. This is because Docker needs to adjust the ownership of these resources and actually stores them in a subdirectory within /var/lib/docker/. It is best to enable this feature on a new Docker installation rather than an existing one.

    Along the same lines, if you disable userns-remap you can’t access any of the resources created while it was enabled.

  5. Check the limitations on user namespaces to be sure your use case is possible.

Enable userns-remap on the daemon

You can start dockerd with the --userns-remap flag or follow this procedure to configure the daemon using the daemon.json configuration file. The daemon.json method is recommended. If you use the flag, use the following command as a model:

$ dockerd --userns-remap="testuser:testuser"
+
  1. Edit /etc/docker/daemon.json. Assuming the file was previously empty, the following entry enables userns-remap using user and group called testuser. You can address the user and group by ID or name. You only need to specify the group name or ID if it is different from the user name or ID. If you provide both the user and group name or ID, separate them by a colon (:) character. The following formats all work for the value, assuming the UID and GID of testuser are 1001:

    • testuser
    • testuser:testuser
    • 1001
    • 1001:1001
    • testuser:1001
    • 1001:testuser
    {
    +  "userns-remap": "testuser"
    +}
    +

    Note: To use the dockremap user and have Docker create it for you, set the value to default rather than testuser.

    Save the file and restart Docker.

  2. If you are using the dockremap user, verify that Docker created it using the id command.

    $ id dockremap
    +
    +uid=112(dockremap) gid=116(dockremap) groups=116(dockremap)
    +

    Verify that the entry has been added to /etc/subuid and /etc/subgid:

    $ grep dockremap /etc/subuid
    +
    +dockremap:231072:65536
    +
    +$ grep dockremap /etc/subgid
    +
    +dockremap:231072:65536
    +

    If these entries are not present, edit the files as the root user and assign a starting UID and GID that is the highest-assigned one plus the offset (in this case, 65536). Be careful not to allow any overlap in the ranges.

  3. Verify that previous images are not available using the docker image ls command. The output should be empty.

  4. Start a container from the hello-world image.

    $ docker run hello-world
    +
  5. Verify that a namespaced directory exists within /var/lib/docker/ named with the UID and GID of the namespaced user, owned by that UID and GID, and not group-or-world-readable. Some of the subdirectories are still owned by root and have different permissions.

    $ sudo ls -ld /var/lib/docker/231072.231072/
    +
    +drwx------ 11 231072 231072 11 Jun 21 21:19 /var/lib/docker/231072.231072/
    +
    +$ sudo ls -l /var/lib/docker/231072.231072/
    +
    +total 14
    +drwx------ 5 231072 231072 5 Jun 21 21:19 aufs
    +drwx------ 3 231072 231072 3 Jun 21 21:21 containers
    +drwx------ 3 root   root   3 Jun 21 21:19 image
    +drwxr-x--- 3 root   root   3 Jun 21 21:19 network
    +drwx------ 4 root   root   4 Jun 21 21:19 plugins
    +drwx------ 2 root   root   2 Jun 21 21:19 swarm
    +drwx------ 2 231072 231072 2 Jun 21 21:21 tmp
    +drwx------ 2 root   root   2 Jun 21 21:19 trust
    +drwx------ 2 231072 231072 3 Jun 21 21:19 volumes
    +

    Your directory listing may have some differences, especially if you use a different container storage driver than aufs.

    The directories which are owned by the remapped user are used instead of the same directories directly beneath /var/lib/docker/ and the unused versions (such as /var/lib/docker/tmp/ in the example here) can be removed. Docker does not use them while userns-remap is enabled.

Disable namespace remapping for a container

If you enable user namespaces on the daemon, all containers are started with user namespaces enabled by default. In some situations, such as privileged containers, you may need to disable user namespaces for a specific container. See user namespace known limitations for some of these limitations.

To disable user namespaces for a specific container, add the --userns=host flag to the docker container create, docker container run, or docker container exec command.

There is a side effect when using this flag: user remapping will not be enabled for that container but, because the read-only (image) layers are shared between containers, ownership of the containers filesystem will still be remapped.

What this means is that the whole container filesystem will belong to the user specified in the --userns-remap daemon config (231072 in the example above). This can lead to unexpected behavior of programs inside the container. For instance sudo (which checks that its binaries belong to user 0) or binaries with a setuid flag.

User namespace known limitations

The following standard Docker features are incompatible with running a Docker daemon with user namespaces enabled:

User namespaces are an advanced feature and require coordination with other capabilities. For example, if volumes are mounted from the host, file ownership must be pre-arranged need read or write access to the volume contents.

While the root user inside a user-namespaced container process has many of the expected privileges of the superuser within the container, the Linux kernel imposes restrictions based on internal knowledge that this is a user-namespaced process. One notable restriction is the inability to use the mknod command. Permission is denied for device creation within the container when run by the root user.

+

security, namespaces

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/security/userns-remap/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fadmin_guide%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fadmin_guide%2Findex.html new file mode 100644 index 00000000..77f110c7 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fadmin_guide%2Findex.html @@ -0,0 +1,33 @@ +

Administer and maintain a swarm of Docker Engines

+ +

When you run a swarm of Docker Engines, manager nodes are the key components for managing the swarm and storing the swarm state. It is important to understand some key features of manager nodes to properly deploy and maintain the swarm.

Refer to How nodes work for a brief overview of Docker Swarm mode and the difference between manager and worker nodes.

Operate manager nodes in a swarm

Swarm manager nodes use the Raft Consensus Algorithm to manage the swarm state. You only need to understand some general concepts of Raft in order to manage a swarm.

There is no limit on the number of manager nodes. The decision about how many manager nodes to implement is a trade-off between performance and fault-tolerance. Adding manager nodes to a swarm makes the swarm more fault-tolerant. However, additional manager nodes reduce write performance because more nodes must acknowledge proposals to update the swarm state. This means more network round-trip traffic.

Raft requires a majority of managers, also called the quorum, to agree on proposed updates to the swarm, such as node additions or removals. Membership operations are subject to the same constraints as state replication.

Maintain the quorum of managers

If the swarm loses the quorum of managers, the swarm cannot perform management tasks. If your swarm has multiple managers, always have more than two. To maintain quorum, a majority of managers must be available. An odd number of managers is recommended, because the next even number does not make the quorum easier to keep. For instance, whether you have 3 or 4 managers, you can still only lose 1 manager and maintain the quorum. If you have 5 or 6 managers, you can still only lose two.

Even if a swarm loses the quorum of managers, swarm tasks on existing worker nodes continue to run. However, swarm nodes cannot be added, updated, or removed, and new or existing tasks cannot be started, stopped, moved, or updated.

See Recovering from losing the quorum for troubleshooting steps if you do lose the quorum of managers.

Configure the manager to advertise on a static IP address

When initiating a swarm, you must specify the --advertise-addr flag to advertise your address to other manager nodes in the swarm. For more information, see Run Docker Engine in swarm mode. Because manager nodes are meant to be a stable component of the infrastructure, you should use a fixed IP address for the advertise address to prevent the swarm from becoming unstable on machine reboot.

If the whole swarm restarts and every manager node subsequently gets a new IP address, there is no way for any node to contact an existing manager. Therefore the swarm is hung while nodes try to contact one another at their old IP addresses.

Dynamic IP addresses are OK for worker nodes.

Add manager nodes for fault tolerance

You should maintain an odd number of managers in the swarm to support manager node failures. Having an odd number of managers ensures that during a network partition, there is a higher chance that the quorum remains available to process requests if the network is partitioned into two sets. Keeping the quorum is not guaranteed if you encounter more than two network partitions.

Swarm Size Majority Fault Tolerance
1 1 0
2 2 0
3 2 1
4 3 1
5 3 2
6 4 2
7 4 3
8 5 3
9 5 4

For example, in a swarm with 5 nodes, if you lose 3 nodes, you don’t have a quorum. Therefore you can’t add or remove nodes until you recover one of the unavailable manager nodes or recover the swarm with disaster recovery commands. See Recover from disaster.

While it is possible to scale a swarm down to a single manager node, it is impossible to demote the last manager node. This ensures you maintain access to the swarm and that the swarm can still process requests. Scaling down to a single manager is an unsafe operation and is not recommended. If the last node leaves the swarm unexpectedly during the demote operation, the swarm becomes unavailable until you reboot the node or restart with --force-new-cluster.

You manage swarm membership with the docker swarm and docker node subsystems. Refer to Add nodes to a swarm for more information on how to add worker nodes and promote a worker node to be a manager.

Distribute manager nodes

In addition to maintaining an odd number of manager nodes, pay attention to datacenter topology when placing managers. For optimal fault-tolerance, distribute manager nodes across a minimum of 3 availability-zones to support failures of an entire set of machines or common maintenance scenarios. If you suffer a failure in any of those zones, the swarm should maintain the quorum of manager nodes available to process requests and rebalance workloads.

Swarm manager nodes Repartition (on 3 Availability zones)
3 1-1-1
5 2-2-1
7 3-2-2
9 3-3-3

Run manager-only nodes

By default manager nodes also act as a worker nodes. This means the scheduler can assign tasks to a manager node. For small and non-critical swarms assigning tasks to managers is relatively low-risk as long as you schedule services using resource constraints for cpu and memory.

However, because manager nodes use the Raft consensus algorithm to replicate data in a consistent way, they are sensitive to resource starvation. You should isolate managers in your swarm from processes that might block swarm operations like swarm heartbeat or leader elections.

To avoid interference with manager node operation, you can drain manager nodes to make them unavailable as worker nodes:

$ docker node update --availability drain <NODE>
+

When you drain a node, the scheduler reassigns any tasks running on the node to other available worker nodes in the swarm. It also prevents the scheduler from assigning tasks to the node.

Add worker nodes for load balancing

Add nodes to the swarm to balance your swarm’s load. Replicated service tasks are distributed across the swarm as evenly as possible over time, as long as the worker nodes are matched to the requirements of the services. When limiting a service to run on only specific types of nodes, such as nodes with a specific number of CPUs or amount of memory, remember that worker nodes that do not meet these requirements cannot run these tasks.

Monitor swarm health

You can monitor the health of manager nodes by querying the docker nodes API in JSON format through the /nodes HTTP endpoint. Refer to the nodes API documentation for more information.

From the command line, run docker node inspect <id-node> to query the nodes. For instance, to query the reachability of the node as a manager:

$ docker node inspect manager1 --format "{{ .ManagerStatus.Reachability }}"
+reachable
+

To query the status of the node as a worker that accept tasks:

$ docker node inspect manager1 --format "{{ .Status.State }}"
+ready
+

From those commands, we can see that manager1 is both at the status reachable as a manager and ready as a worker.

An unreachable health status means that this particular manager node is unreachable from other manager nodes. In this case you need to take action to restore the unreachable manager:

Alternatively you can also get an overview of the swarm health from a manager node with docker node ls:

$ docker node ls
+ID                           HOSTNAME  MEMBERSHIP  STATUS  AVAILABILITY  MANAGER STATUS
+1mhtdwhvsgr3c26xxbnzdc3yp    node05    Accepted    Ready   Active
+516pacagkqp2xc3fk9t1dhjor    node02    Accepted    Ready   Active        Reachable
+9ifojw8of78kkusuc4a6c23fx *  node01    Accepted    Ready   Active        Leader
+ax11wdpwrrb6db3mfjydscgk7    node04    Accepted    Ready   Active
+bb1nrq2cswhtbg4mrsqnlx1ck    node03    Accepted    Ready   Active        Reachable
+di9wxgz8dtuh9d2hn089ecqkf    node06    Accepted    Ready   Active
+

Troubleshoot a manager node

You should never restart a manager node by copying the raft directory from another node. The data directory is unique to a node ID. A node can only use a node ID once to join the swarm. The node ID space should be globally unique.

To cleanly re-join a manager node to a cluster:

  1. To demote the node to a worker, run docker node demote <NODE>.
  2. To remove the node from the swarm, run docker node rm <NODE>.
  3. Re-join the node to the swarm with a fresh state using docker swarm join.

For more information on joining a manager node to a swarm, refer to Join nodes to a swarm.

Forcibly remove a node

In most cases, you should shut down a node before removing it from a swarm with the docker node rm command. If a node becomes unreachable, unresponsive, or compromised you can forcefully remove the node without shutting it down by passing the --force flag. For instance, if node9 becomes compromised:

$ docker node rm node9
+
+Error response from daemon: rpc error: code = 9 desc = node node9 is not down and can't be removed
+
+$ docker node rm --force node9
+
+Node node9 removed from swarm
+

Before you forcefully remove a manager node, you must first demote it to the worker role. Make sure that you always have an odd number of manager nodes if you demote or remove a manager.

Back up the swarm

Docker manager nodes store the swarm state and manager logs in the /var/lib/docker/swarm/ directory. This data includes the keys used to encrypt the Raft logs. Without these keys, you cannot restore the swarm.

You can back up the swarm using any manager. Use the following procedure.

  1. If the swarm has auto-lock enabled, you need the unlock key to restore the swarm from backup. Retrieve the unlock key if necessary and store it in a safe location. If you are unsure, read Lock your swarm to protect its encryption key.

  2. Stop Docker on the manager before backing up the data, so that no data is being changed during the backup. It is possible to take a backup while the manager is running (a “hot” backup), but this is not recommended and your results are less predictable when restoring. While the manager is down, other nodes continue generating swarm data that is not part of this backup.

    Note

    Be sure to maintain the quorum of swarm managers. During the time that a manager is shut down, your swarm is more vulnerable to losing the quorum if further nodes are lost. The number of managers you run is a trade-off. If you regularly take down managers to do backups, consider running a five manager swarm, so that you can lose an additional manager while the backup is running, without disrupting your services.

  3. Back up the entire /var/lib/docker/swarm directory.

  4. Restart the manager.

To restore, see Restore from a backup.

Recover from disaster

Restore from a backup

After backing up the swarm as described in Back up the swarm, use the following procedure to restore the data to a new swarm.

  1. Shut down Docker on the target host machine for the restored swarm.

  2. Remove the contents of the /var/lib/docker/swarm directory on the new swarm.

  3. Restore the /var/lib/docker/swarm directory with the contents of the backup.

    Note

    The new node uses the same encryption key for on-disk storage as the old one. It is not possible to change the on-disk storage encryption keys at this time.

    In the case of a swarm with auto-lock enabled, the unlock key is also the same as on the old swarm, and the unlock key is needed to restore the swarm.

  4. Start Docker on the new node. Unlock the swarm if necessary. Re-initialize the swarm using the following command, so that this node does not attempt to connect to nodes that were part of the old swarm, and presumably no longer exist.

    $ docker swarm init --force-new-cluster
    +
  5. Verify that the state of the swarm is as expected. This may include application-specific tests or simply checking the output of docker service ls to be sure that all expected services are present.

  6. If you use auto-lock, rotate the unlock key.

  7. Add manager and worker nodes to bring your new swarm up to operating capacity.

  8. Reinstate your previous backup regimen on the new swarm.

Recover from losing the quorum

Swarm is resilient to failures and the swarm can recover from any number of temporary node failures (machine reboots or crash with restart) or other transient errors. However, a swarm cannot automatically recover if it loses a quorum. Tasks on existing worker nodes continue to run, but administrative tasks are not possible, including scaling or updating services and joining or removing nodes from the swarm. The best way to recover is to bring the missing manager nodes back online. If that is not possible, continue reading for some options for recovering your swarm.

In a swarm of N managers, a quorum (a majority) of manager nodes must always be available. For example, in a swarm with five managers, a minimum of three must be operational and in communication with each other. In other words, the swarm can tolerate up to (N-1)/2 permanent failures beyond which requests involving swarm management cannot be processed. These types of failures include data corruption or hardware failures.

If you lose the quorum of managers, you cannot administer the swarm. If you have lost the quorum and you attempt to perform any management operation on the swarm, an error occurs:

Error response from daemon: rpc error: code = 4 desc = context deadline exceeded
+

The best way to recover from losing the quorum is to bring the failed nodes back online. If you can’t do that, the only way to recover from this state is to use the --force-new-cluster action from a manager node. This removes all managers except the manager the command was run from. The quorum is achieved because there is now only one manager. Promote nodes to be managers until you have the desired number of managers.

From the node to recover, run:

$ docker swarm init --force-new-cluster --advertise-addr node01:2377
+

When you run the docker swarm init command with the --force-new-cluster flag, the Docker Engine where you run the command becomes the manager node of a single-node swarm which is capable of managing and running services. The manager has all the previous information about services and tasks, worker nodes are still part of the swarm, and services are still running. You need to add or re-add manager nodes to achieve your previous task distribution and ensure that you have enough managers to maintain high availability and prevent losing the quorum.

Force the swarm to rebalance

Generally, you do not need to force the swarm to rebalance its tasks. When you add a new node to a swarm, or a node reconnects to the swarm after a period of unavailability, the swarm does not automatically give a workload to the idle node. This is a design decision. If the swarm periodically shifted tasks to different nodes for the sake of balance, the clients using those tasks would be disrupted. The goal is to avoid disrupting running services for the sake of balance across the swarm. When new tasks start, or when a node with running tasks becomes unavailable, those tasks are given to less busy nodes. The goal is eventual balance, with minimal disruption to the end user.

You can use the --force or -f flag with the docker service update command to force the service to redistribute its tasks across the available worker nodes. This causes the service tasks to restart. Client applications may be disrupted. If you have configured it, your service uses a rolling update.

If you use an earlier version and you want to achieve an even balance of load across workers and don’t mind disrupting running tasks, you can force your swarm to re-balance by temporarily scaling the service upward. Use docker service inspect --pretty <servicename> to see the configured scale of a service. When you use docker service scale, the nodes with the lowest number of tasks are targeted to receive the new workloads. There may be multiple under-loaded nodes in your swarm. You may need to scale the service up by modest increments a few times to achieve the balance you want across all the nodes.

When the load is balanced to your satisfaction, you can scale the service back down to the original scale. You can use docker service ps to assess the current balance of your service across nodes.

See also docker service scale and docker service ps.

+

docker, container, swarm, manager, raft

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/admin_guide/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fconfigs%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fconfigs%2Findex.html new file mode 100644 index 00000000..913b4ce8 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fconfigs%2Findex.html @@ -0,0 +1,236 @@ +

Store configuration data using Docker Configs

+ +

About configs

Docker swarm service configs allow you to store non-sensitive information, such as configuration files, outside a service’s image or running containers. This allows you to keep your images as generic as possible, without the need to bind-mount configuration files into the containers or use environment variables.

Configs operate in a similar way to secrets, except that they are not encrypted at rest and are mounted directly into the container’s filesystem without the use of RAM disks. Configs can be added or removed from a service at any time, and services can share a config. You can even use configs in conjunction with environment variables or labels, for maximum flexibility. Config values can be generic strings or binary content (up to 500 kb in size).

Note: Docker configs are only available to swarm services, not to standalone containers. To use this feature, consider adapting your container to run as a service with a scale of 1.

Configs are supported on both Linux and Windows services.

Windows support

Docker includes support for configs on Windows containers, but there are differences in the implementations, which are called out in the examples below. Keep the following notable differences in mind:

How Docker manages configs

When you add a config to the swarm, Docker sends the config to the swarm manager over a mutual TLS connection. The config is stored in the Raft log, which is encrypted. The entire Raft log is replicated across the other managers, ensuring the same high availability guarantees for configs as for the rest of the swarm management data.

When you grant a newly-created or running service access to a config, the config is mounted as a file in the container. The location of the mount point within the container defaults to /<config-name> in Linux containers. In Windows containers, configs are all mounted into C:\ProgramData\Docker\configs and symbolic links are created to the desired location, which defaults to C:\<config-name>.

You can set the ownership (uid and gid) for the config, using either the numerical ID or the name of the user or group. You can also specify the file permissions (mode). These settings are ignored for Windows containers.

You can update a service to grant it access to additional configs or revoke its access to a given config at any time.

A node only has access to configs if the node is a swarm manager or if it is running service tasks which have been granted access to the config. When a container task stops running, the configs shared to it are unmounted from the in-memory filesystem for that container and flushed from the node’s memory.

If a node loses connectivity to the swarm while it is running a task container with access to a config, the task container still has access to its configs, but cannot receive updates until the node reconnects to the swarm.

You can add or inspect an individual config at any time, or list all configs. You cannot remove a config that a running service is using. See Rotate a config for a way to remove a config without disrupting running services.

To update or roll back configs more easily, consider adding a version number or date to the config name. This is made easier by the ability to control the mount point of the config within a given container.

To update a stack, make changes to your Compose file, then re-run docker stack deploy -c <new-compose-file> <stack-name>. If you use a new config in that file, your services start using them. Keep in mind that configurations are immutable, so you can’t change the file for an existing service. Instead, you create a new config to use a different file

You can run docker stack rm to stop the app and take down the stack. This removes any config that was created by docker stack deploy with the same stack name. This removes all configs, including those not referenced by services and those remaining after a docker service update --config-rm.

Read more about docker config commands

Use these links to read about specific commands, or continue to the example about using configs with a service.

Examples

This section includes graduated examples which illustrate how to use Docker configs.

Note: These examples use a single-Engine swarm and unscaled services for simplicity. The examples use Linux containers, but Windows containers also support configs.

Defining and using configs in compose files

The docker stack command supports defining configs in a Compose file. However, the configs key is not supported for docker compose. See the Compose file reference for details.

Simple example: Get started with configs

This simple example shows how configs work in just a few commands. For a real-world example, continue to Advanced example: Use configs with a Nginx service.

  1. Add a config to Docker. The docker config create command reads standard input because the last argument, which represents the file to read the config from, is set to -.

    $ echo "This is a config" | docker config create my-config -
    +
  2. Create a redis service and grant it access to the config. By default, the container can access the config at /my-config, but you can customize the file name on the container using the target option.

    $ docker service create --name redis --config my-config redis:alpine
    +
  3. Verify that the task is running without issues using docker service ps. If everything is working, the output looks similar to this:

    $ docker service ps redis
    +
    +ID            NAME     IMAGE         NODE              DESIRED STATE  CURRENT STATE          ERROR  PORTS
    +bkna6bpn8r1a  redis.1  redis:alpine  ip-172-31-46-109  Running        Running 8 seconds ago
    +
  4. Get the ID of the redis service task container using docker ps, so that you can use docker container exec to connect to the container and read the contents of the config data file, which defaults to being readable by all and has the same name as the name of the config. The first command below illustrates how to find the container ID, and the second and third commands use shell completion to do this automatically.

    $ docker ps --filter name=redis -q
    +
    +5cb1c2348a59
    +
    +$ docker container exec $(docker ps --filter name=redis -q) ls -l /my-config
    +
    +-r--r--r--    1 root     root            12 Jun  5 20:49 my-config
    +
    +$ docker container exec $(docker ps --filter name=redis -q) cat /my-config
    +
    +This is a config
    +
  5. Try removing the config. The removal fails because the redis service is running and has access to the config.

    +$ docker config ls
    +
    +ID                          NAME                CREATED             UPDATED
    +fzwcfuqjkvo5foqu7ts7ls578   hello               31 minutes ago      31 minutes ago
    +
    +
    +$ docker config rm my-config
    +
    +Error response from daemon: rpc error: code = 3 desc = config 'my-config' is
    +in use by the following service: redis
    +
  6. Remove access to the config from the running redis service by updating the service.

    $ docker service update --config-rm my-config redis
    +
  7. Repeat steps 3 and 4 again, verifying that the service no longer has access to the config. The container ID is different, because the service update command redeploys the service.

    $ docker container exec -it $(docker ps --filter name=redis -q) cat /my-config
    +
    +cat: can't open '/my-config': No such file or directory
    +
  8. Stop and remove the service, and remove the config from Docker.

    $ docker service rm redis
    +
    +$ docker config rm my-config
    +

Simple example: Use configs in a Windows service

This is a very simple example which shows how to use configs with a Microsoft IIS service running on Docker for Windows running Windows containers on Microsoft Windows 10. It is a naive example that stores the webpage in a config.

This example assumes that you have PowerShell installed.

  1. Save the following into a new file index.html.

    <html lang="en">
    +  <head><title>Hello Docker</title></head>
    +  <body>
    +    <p>Hello Docker! You have deployed a HTML page.</p>
    +  </body>
    +</html>
    +
  2. If you have not already done so, initialize or join the swarm.

    docker swarm init
    +
  3. Save the index.html file as a swarm config named homepage.

    docker config create homepage index.html
    +
  4. Create an IIS service and grant it access to the homepage config.

    docker service create
    +    --name my-iis
    +    --publish published=8000,target=8000
    +    --config src=homepage,target="\inetpub\wwwroot\index.html"
    +    microsoft/iis:nanoserver
    +
  5. Access the IIS service at http://localhost:8000/. It should serve the HTML content from the first step.

  6. Remove the service and the config.

    docker service rm my-iis
    +
    +docker config rm homepage
    +

Example: Use a templated config

To create a configuration in which the content will be generated using a template engine, use the --template-driver parameter and specify the engine name as its argument. The template will be rendered when container is created.

  1. Save the following into a new file index.html.tmpl.

    <html lang="en">
    +  <head><title>Hello Docker</title></head>
    +  <body>
    +    <p>Hello {{ env "HELLO" }}! I'm service {{ .Service.Name }}.</p>
    +  </body>
    +</html>
    +
  2. Save the index.html.tmpl file as a swarm config named homepage. Provide parameter --template-driver and specify golang as template engine.

    $ docker config create --template-driver golang homepage index.html.tmpl
    +
  3. Create a service that runs Nginx and has access to the environment variable HELLO and to the config.

    $ docker service create \
    +     --name hello-template \
    +     --env HELLO="Docker" \
    +     --config source=homepage,target=/usr/share/nginx/html/index.html \
    +     --publish published=3000,target=80 \
    +     nginx:alpine
    +
  4. Verify that the service is operational: you can reach the Nginx server, and that the correct output is being served.

    $ curl http://0.0.0.0:3000
    +
    +<html lang="en">
    +  <head><title>Hello Docker</title></head>
    +  <body>
    +    <p>Hello Docker! I'm service hello-template.</p>
    +  </body>
    +</html>
    +

Advanced example: Use configs with a Nginx service

This example is divided into two parts. The first part is all about generating the site certificate and does not directly involve Docker configs at all, but it sets up the second part, where you store and use the site certificate as a series of secrets and the Nginx configuration as a config. The example shows how to set options on the config, such as the target location within the container and the file permissions (mode).

Generate the site certificate

Generate a root CA and TLS certificate and key for your site. For production sites, you may want to use a service such as Let’s Encrypt to generate the TLS certificate and key, but this example uses command-line tools. This step is a little complicated, but is only a set-up step so that you have something to store as a Docker secret. If you want to skip these sub-steps, you can use Let’s Encrypt to generate the site key and certificate, name the files site.key and site.crt, and skip to Configure the Nginx container.

  1. Generate a root key.

    $ openssl genrsa -out "root-ca.key" 4096
    +
  2. Generate a CSR using the root key.

    $ openssl req \
    +          -new -key "root-ca.key" \
    +          -out "root-ca.csr" -sha256 \
    +          -subj '/C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA'
    +
  3. Configure the root CA. Edit a new file called root-ca.cnf and paste the following contents into it. This constrains the root CA to only sign leaf certificates and not intermediate CAs.

    [root_ca]
    +basicConstraints = critical,CA:TRUE,pathlen:1
    +keyUsage = critical, nonRepudiation, cRLSign, keyCertSign
    +subjectKeyIdentifier=hash
    +
  4. Sign the certificate.

    $ openssl x509 -req -days 3650 -in "root-ca.csr" \
    +               -signkey "root-ca.key" -sha256 -out "root-ca.crt" \
    +               -extfile "root-ca.cnf" -extensions \
    +               root_ca
    +
  5. Generate the site key.

    $ openssl genrsa -out "site.key" 4096
    +
  6. Generate the site certificate and sign it with the site key.

    $ openssl req -new -key "site.key" -out "site.csr" -sha256 \
    +          -subj '/C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost'
    +
  7. Configure the site certificate. Edit a new file called site.cnf and paste the following contents into it. This constrains the site certificate so that it can only be used to authenticate a server and can’t be used to sign certificates.

    [server]
    +authorityKeyIdentifier=keyid,issuer
    +basicConstraints = critical,CA:FALSE
    +extendedKeyUsage=serverAuth
    +keyUsage = critical, digitalSignature, keyEncipherment
    +subjectAltName = DNS:localhost, IP:127.0.0.1
    +subjectKeyIdentifier=hash
    +
  8. Sign the site certificate.

    $ openssl x509 -req -days 750 -in "site.csr" -sha256 \
    +    -CA "root-ca.crt" -CAkey "root-ca.key" -CAcreateserial \
    +    -out "site.crt" -extfile "site.cnf" -extensions server
    +
  9. The site.csr and site.cnf files are not needed by the Nginx service, but you need them if you want to generate a new site certificate. Protect the root-ca.key file.

Configure the Nginx container

  1. Produce a very basic Nginx configuration that serves static files over HTTPS. The TLS certificate and key are stored as Docker secrets so that they can be rotated easily.

    In the current directory, create a new file called site.conf with the following contents:

    server {
    +    listen                443 ssl;
    +    server_name           localhost;
    +    ssl_certificate       /run/secrets/site.crt;
    +    ssl_certificate_key   /run/secrets/site.key;
    +
    +    location / {
    +        root   /usr/share/nginx/html;
    +        index  index.html index.htm;
    +    }
    +}
    +
  2. Create two secrets, representing the key and the certificate. You can store any file as a secret as long as it is smaller than 500 KB. This allows you to decouple the key and certificate from the services that use them. In these examples, the secret name and the file name are the same.

    $ docker secret create site.key site.key
    +
    +$ docker secret create site.crt site.crt
    +
  3. Save the site.conf file in a Docker config. The first parameter is the name of the config, and the second parameter is the file to read it from.

    $ docker config create site.conf site.conf
    +

    List the configs:

    $ docker config ls
    +
    +ID                          NAME                CREATED             UPDATED
    +4ory233120ccg7biwvy11gl5z   site.conf           4 seconds ago       4 seconds ago
    +
  4. Create a service that runs Nginx and has access to the two secrets and the config. Set the mode to 0440 so that the file is only readable by its owner and that owner’s group, not the world.

    $ docker service create \
    +     --name nginx \
    +     --secret site.key \
    +     --secret site.crt \
    +     --config source=site.conf,target=/etc/nginx/conf.d/site.conf,mode=0440 \
    +     --publish published=3000,target=443 \
    +     nginx:latest \
    +     sh -c "exec nginx -g 'daemon off;'"
    +

    Within the running containers, the following three files now exist:

    • /run/secrets/site.key
    • /run/secrets/site.crt
    • /etc/nginx/conf.d/site.conf
  5. Verify that the Nginx service is running.

    $ docker service ls
    +
    +ID            NAME   MODE        REPLICAS  IMAGE
    +zeskcec62q24  nginx  replicated  1/1       nginx:latest
    +
    +$ docker service ps nginx
    +
    +NAME                  IMAGE         NODE  DESIRED STATE  CURRENT STATE          ERROR  PORTS
    +nginx.1.9ls3yo9ugcls  nginx:latest  moby  Running        Running 3 minutes ago
    +
  6. Verify that the service is operational: you can reach the Nginx server, and that the correct TLS certificate is being used.

    $ curl --cacert root-ca.crt https://0.0.0.0:3000
    +
    +<!DOCTYPE html>
    +<html>
    +<head>
    +<title>Welcome to nginx!</title>
    +<style>
    +    body {
    +        width: 35em;
    +        margin: 0 auto;
    +        font-family: Tahoma, Verdana, Arial, sans-serif;
    +    }
    +</style>
    +</head>
    +<body>
    +<h1>Welcome to nginx!</h1>
    +<p>If you see this page, the nginx web server is successfully installed and
    +working. Further configuration is required.</p>
    +
    +<p>For online documentation and support, refer to
    +<a href="https://nginx.org">nginx.org</a>.<br/>
    +Commercial support is available at
    +<a href="https://www.nginx.com">www.nginx.com</a>.</p>
    +
    +<p><em>Thank you for using nginx.</em></p>
    +</body>
    +</html>
    +
    $ openssl s_client -connect 0.0.0.0:3000 -CAfile root-ca.crt
    +
    +CONNECTED(00000003)
    +depth=1 /C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA
    +verify return:1
    +depth=0 /C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost
    +verify return:1
    +---
    +Certificate chain
    + 0 s:/C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost
    +   i:/C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA
    +---
    +Server certificate
    +-----BEGIN CERTIFICATE-----
    +…
    +-----END CERTIFICATE-----
    +subject=/C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost
    +issuer=/C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA
    +---
    +No client certificate CA names sent
    +---
    +SSL handshake has read 1663 bytes and written 712 bytes
    +---
    +New, TLSv1/SSLv3, Cipher is AES256-SHA
    +Server public key is 4096 bit
    +Secure Renegotiation IS supported
    +Compression: NONE
    +Expansion: NONE
    +SSL-Session:
    +    Protocol  : TLSv1
    +    Cipher    : AES256-SHA
    +    Session-ID: A1A8BF35549C5715648A12FD7B7E3D861539316B03440187D9DA6C2E48822853
    +    Session-ID-ctx:
    +    Master-Key: F39D1B12274BA16D3A906F390A61438221E381952E9E1E05D3DD784F0135FB81353DA38C6D5C021CB926E844DFC49FC4
    +    Key-Arg   : None
    +    Start Time: 1481685096
    +    Timeout   : 300 (sec)
    +    Verify return code: 0 (ok)
    +
  7. Unless you are going to continue to the next example, clean up after running this example by removing the nginx service and the stored secrets and config.

    $ docker service rm nginx
    +
    +$ docker secret rm site.crt site.key
    +
    +$ docker config rm site.conf
    +

You have now configured a Nginx service with its configuration decoupled from its image. You could run multiple sites with exactly the same image but separate configurations, without the need to build a custom image at all.

Example: Rotate a config

To rotate a config, you first save a new config with a different name than the one that is currently in use. You then redeploy the service, removing the old config and adding the new config at the same mount point within the container. This example builds upon the previous one by rotating the site.conf configuration file.

  1. Edit the site.conf file locally. Add index.php to the index line, and save the file.

    server {
    +    listen                443 ssl;
    +    server_name           localhost;
    +    ssl_certificate       /run/secrets/site.crt;
    +    ssl_certificate_key   /run/secrets/site.key;
    +
    +    location / {
    +        root   /usr/share/nginx/html;
    +        index  index.html index.htm index.php;
    +    }
    +}
    +
  2. Create a new Docker config using the new site.conf, called site-v2.conf.

    $ docker config create site-v2.conf site.conf
    +
  3. Update the nginx service to use the new config instead of the old one.

    $ docker service update \
    +  --config-rm site.conf \
    +  --config-add source=site-v2.conf,target=/etc/nginx/conf.d/site.conf,mode=0440 \
    +  nginx
    +
  4. Verify that the nginx service is fully re-deployed, using docker service ps nginx. When it is, you can remove the old site.conf config.

    $ docker config rm site.conf
    +
  5. To clean up, you can remove the nginx service, as well as the secrets and configs.

    $ docker service rm nginx
    +
    +$ docker secret rm site.crt site.key
    +
    +$ docker config rm site-v2.conf
    +

You have now updated your nginx service’s configuration without the need to rebuild its image.

+

swarm, configuration, configs

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/configs/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fnodes%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fnodes%2Findex.html new file mode 100644 index 00000000..adccc947 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fnodes%2Findex.html @@ -0,0 +1,11 @@ +

How nodes work

+ +

Docker Engine 1.12 introduces swarm mode that enables you to create a cluster of one or more Docker Engines called a swarm. A swarm consists of one or more nodes: physical or virtual machines running Docker Engine 1.12 or later in swarm mode.

There are two types of nodes: managers and workers.

Swarm mode cluster

If you haven’t already, read through the swarm mode overview and key concepts.

Manager nodes

Manager nodes handle cluster management tasks:

Using a Raft implementation, the managers maintain a consistent internal state of the entire swarm and all the services running on it. For testing purposes it is OK to run a swarm with a single manager. If the manager in a single-manager swarm fails, your services continue to run, but you need to create a new cluster to recover.

To take advantage of swarm mode’s fault-tolerance features, Docker recommends you implement an odd number of nodes according to your organization’s high-availability requirements. When you have multiple managers you can recover from the failure of a manager node without downtime.

Worker nodes

Worker nodes are also instances of Docker Engine whose sole purpose is to execute containers. Worker nodes don’t participate in the Raft distributed state, make scheduling decisions, or serve the swarm mode HTTP API.

You can create a swarm of one manager node, but you cannot have a worker node without at least one manager node. By default, all managers are also workers. In a single manager node cluster, you can run commands like docker service create and the scheduler places all tasks on the local Engine.

To prevent the scheduler from placing tasks on a manager node in a multi-node swarm, set the availability for the manager node to Drain. The scheduler gracefully stops tasks on nodes in Drain mode and schedules the tasks on an Active node. The scheduler does not assign new tasks to nodes with Drain availability.

Refer to the docker node update command line reference to see how to change node availability.

Change roles

You can promote a worker node to be a manager by running docker node promote. For example, you may want to promote a worker node when you take a manager node offline for maintenance. See node promote.

You can also demote a manager node to a worker node. See node demote.

Learn more

+

docker, container, cluster, swarm mode, node

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fpki%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fpki%2Findex.html new file mode 100644 index 00000000..fc38937a --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fpki%2Findex.html @@ -0,0 +1,22 @@ +

Manage swarm security with public key infrastructure (PKI)

+ +

The swarm mode public key infrastructure (PKI) system built into Docker makes it simple to securely deploy a container orchestration system. The nodes in a swarm use mutual Transport Layer Security (TLS) to authenticate, authorize, and encrypt the communications with other nodes in the swarm.

When you create a swarm by running docker swarm init, Docker designates itself as a manager node. By default, the manager node generates a new root Certificate Authority (CA) along with a key pair, which are used to secure communications with other nodes that join the swarm. If you prefer, you can specify your own externally-generated root CA, using the --external-ca flag of the docker swarm init command.

The manager node also generates two tokens to use when you join additional nodes to the swarm: one worker token and one manager token. Each token includes the digest of the root CA’s certificate and a randomly generated secret. When a node joins the swarm, the joining node uses the digest to validate the root CA certificate from the remote manager. The remote manager uses the secret to ensure the joining node is an approved node.

Each time a new node joins the swarm, the manager issues a certificate to the node. The certificate contains a randomly generated node ID to identify the node under the certificate common name (CN) and the role under the organizational unit (OU). The node ID serves as the cryptographically secure node identity for the lifetime of the node in the current swarm.

The diagram below illustrates how manager nodes and worker nodes encrypt communications using a minimum of TLS 1.2.

tls diagram

The example below shows the information from a certificate from a worker node:

Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number:
+            3b:1c:06:91:73:fb:16:ff:69:c3:f7:a2:fe:96:c1:73:e2:80:97:3b
+        Signature Algorithm: ecdsa-with-SHA256
+        Issuer: CN=swarm-ca
+        Validity
+            Not Before: Aug 30 02:39:00 2016 GMT
+            Not After : Nov 28 03:39:00 2016 GMT
+        Subject: O=ec2adilxf4ngv7ev8fwsi61i7, OU=swarm-worker, CN=dw02poa4vqvzxi5c10gm4pq2g
+...snip...
+

By default, each node in the swarm renews its certificate every three months. You can configure this interval by running the docker swarm update --cert-expiry <TIME PERIOD> command. The minimum rotation value is 1 hour. Refer to the docker swarm update CLI reference for details.

Rotating the CA certificate

In the event that a cluster CA key or a manager node is compromised, you can rotate the swarm root CA so that none of the nodes trust certificates signed by the old root CA anymore.

Run docker swarm ca --rotate to generate a new CA certificate and key. If you prefer, you can pass the --ca-cert and --external-ca flags to specify the root certificate and to use a root CA external to the swarm. Alternately, you can pass the --ca-cert and --ca-key flags to specify the exact certificate and key you would like the swarm to use.

When you issue the docker swarm ca --rotate command, the following things happen in sequence:

  1. Docker generates a cross-signed certificate. This means that a version of the new root CA certificate is signed with the old root CA certificate. This cross-signed certificate is used as an intermediate certificate for all new node certificates. This ensures that nodes that still trust the old root CA can still validate a certificate signed by the new CA.

  2. Docker also tells all nodes to immediately renew their TLS certificates. This process may take several minutes, depending on the number of nodes in the swarm.

  3. After every node in the swarm has a new TLS certificate signed by the new CA, Docker forgets about the old CA certificate and key material, and tells all the nodes to trust the new CA certificate only.

    This also causes a change in the swarm’s join tokens. The previous join tokens are no longer valid.

From this point on, all new node certificates issued are signed with the new root CA, and do not contain any intermediates.

Learn More

+

swarm, security, tls, pki

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/how-swarm-mode-works/pki/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fservices%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fservices%2Findex.html new file mode 100644 index 00000000..dcbf3a15 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fhow-swarm-mode-works%2Fservices%2Findex.html @@ -0,0 +1,10 @@ +

How services work

+ +

To deploy an application image when Docker Engine is in swarm mode, you create a service. Frequently a service is the image for a microservice within the context of some larger application. Examples of services might include an HTTP server, a database, or any other type of executable program that you wish to run in a distributed environment.

When you create a service, you specify which container image to use and which commands to execute inside running containers. You also define options for the service including:

Services, tasks, and containers

When you deploy the service to the swarm, the swarm manager accepts your service definition as the desired state for the service. Then it schedules the service on nodes in the swarm as one or more replica tasks. The tasks run independently of each other on nodes in the swarm.

For example, imagine you want to load balance between three instances of an HTTP listener. The diagram below shows an HTTP listener service with three replicas. Each of the three instances of the listener is a task in the swarm.

services diagram

A container is an isolated process. In the swarm mode model, each task invokes exactly one container. A task is analogous to a “slot” where the scheduler places a container. Once the container is live, the scheduler recognizes that the task is in a running state. If the container fails health checks or terminates, the task terminates.

Tasks and scheduling

A task is the atomic unit of scheduling within a swarm. When you declare a desired service state by creating or updating a service, the orchestrator realizes the desired state by scheduling tasks. For instance, you define a service that instructs the orchestrator to keep three instances of an HTTP listener running at all times. The orchestrator responds by creating three tasks. Each task is a slot that the scheduler fills by spawning a container. The container is the instantiation of the task. If an HTTP listener task subsequently fails its health check or crashes, the orchestrator creates a new replica task that spawns a new container.

A task is a one-directional mechanism. It progresses monotonically through a series of states: assigned, prepared, running, etc. If the task fails the orchestrator removes the task and its container and then creates a new task to replace it according to the desired state specified by the service.

The underlying logic of Docker swarm mode is a general purpose scheduler and orchestrator. The service and task abstractions themselves are unaware of the containers they implement. Hypothetically, you could implement other types of tasks such as virtual machine tasks or non-containerized process tasks. The scheduler and orchestrator are agnostic about the type of task. However, the current version of Docker only supports container tasks.

The diagram below shows how swarm mode accepts service create requests and schedules tasks to worker nodes.

services flow

Pending services

A service may be configured in such a way that no node currently in the swarm can run its tasks. In this case, the service remains in state pending. Here are a few examples of when a service might remain in state pending.

Note: If your only intention is to prevent a service from being deployed, scale the service to 0 instead of trying to configure it in such a way that it remains in pending.

This behavior illustrates that the requirements and configuration of your tasks are not tightly tied to the current state of the swarm. As the administrator of a swarm, you declare the desired state of your swarm, and the manager works with the nodes in the swarm to create that state. You do not need to micro-manage the tasks on the swarm.

Replicated and global services

There are two types of service deployments, replicated and global.

For a replicated service, you specify the number of identical tasks you want to run. For example, you decide to deploy an HTTP service with three replicas, each serving the same content.

A global service is a service that runs one task on every node. There is no pre-specified number of tasks. Each time you add a node to the swarm, the orchestrator creates a task and the scheduler assigns the task to the new node. Good candidates for global services are monitoring agents, an anti-virus scanners or other types of containers that you want to run on every node in the swarm.

The diagram below shows a three-service replica in yellow and a global service in gray.

global vs replicated services

Learn more

+

docker, container, cluster, swarm mode, node

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Findex.html b/devdocs/docker/engine%2Fswarm%2Findex.html new file mode 100644 index 00000000..49fa8156 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Findex.html @@ -0,0 +1,10 @@ +

Swarm mode overview

+ +

To use Docker in swarm mode, install Docker. See installation instructions for all operating systems and platforms.

Current versions of Docker include swarm mode for natively managing a cluster of Docker Engines called a swarm. Use the Docker CLI to create a swarm, deploy application services to a swarm, and manage swarm behavior.

Docker Swarm mode is built into the Docker Engine. Do not confuse Docker Swarm mode with Docker Classic Swarm which is no longer actively developed.

Feature highlights

What’s next?

Swarm mode key concepts and tutorial

Swarm mode CLI commands

Explore swarm mode CLI commands

+

docker, container, cluster, swarm

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fingress%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fingress%2Findex.html new file mode 100644 index 00000000..ffdaeb45 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fingress%2Findex.html @@ -0,0 +1,66 @@ +

Use swarm mode routing mesh

+ +

Docker Engine swarm mode makes it easy to publish ports for services to make them available to resources outside the swarm. All nodes participate in an ingress routing mesh. The routing mesh enables each node in the swarm to accept connections on published ports for any service running in the swarm, even if there’s no task running on the node. The routing mesh routes all incoming requests to published ports on available nodes to an active container.

To use the ingress network in the swarm, you need to have the following ports open between the swarm nodes before you enable swarm mode:

You must also open the published port between the swarm nodes and any external resources, such as an external load balancer, that require access to the port.

You can also bypass the routing mesh for a given service.

Publish a port for a service

Use the --publish flag to publish a port when you create a service. target is used to specify the port inside the container, and published is used to specify the port to bind on the routing mesh. If you leave off the published port, a random high-numbered port is bound for each service task. You need to inspect the task to determine the port.

$ docker service create \
+  --name <SERVICE-NAME> \
+  --publish published=<PUBLISHED-PORT>,target=<CONTAINER-PORT> \
+  <IMAGE>
+

Note: The older form of this syntax is a colon-separated string, where the published port is first and the target port is second, such as -p 8080:80. The new syntax is preferred because it is easier to read and allows more flexibility.

The <PUBLISHED-PORT> is the port where the swarm makes the service available. If you omit it, a random high-numbered port is bound. The <CONTAINER-PORT> is the port where the container listens. This parameter is required.

For example, the following command publishes port 80 in the nginx container to port 8080 for any node in the swarm:

$ docker service create \
+  --name my-web \
+  --publish published=8080,target=80 \
+  --replicas 2 \
+  nginx
+

When you access port 8080 on any node, Docker routes your request to an active container. On the swarm nodes themselves, port 8080 may not actually be bound, but the routing mesh knows how to route the traffic and prevents any port conflicts from happening.

The routing mesh listens on the published port for any IP address assigned to the node. For externally routable IP addresses, the port is available from outside the host. For all other IP addresses the access is only available from within the host.

service ingress image

You can publish a port for an existing service using the following command:

$ docker service update \
+  --publish-add published=<PUBLISHED-PORT>,target=<CONTAINER-PORT> \
+  <SERVICE>
+

You can use docker service inspect to view the service’s published port. For instance:

$ docker service inspect --format="{{json .Endpoint.Spec.Ports}}" my-web
+
+[{"Protocol":"tcp","TargetPort":80,"PublishedPort":8080}]
+

The output shows the <CONTAINER-PORT> (labeled TargetPort) from the containers and the <PUBLISHED-PORT> (labeled PublishedPort) where nodes listen for requests for the service.

Publish a port for TCP only or UDP only

By default, when you publish a port, it is a TCP port. You can specifically publish a UDP port instead of or in addition to a TCP port. When you publish both TCP and UDP ports, If you omit the protocol specifier, the port is published as a TCP port. If you use the longer syntax (recommended), set the protocol key to either tcp or udp.

TCP only

Long syntax:

$ docker service create --name dns-cache \
+  --publish published=53,target=53 \
+  dns-cache
+

Short syntax:

$ docker service create --name dns-cache \
+  -p 53:53 \
+  dns-cache
+

TCP and UDP

Long syntax:

$ docker service create --name dns-cache \
+  --publish published=53,target=53 \
+  --publish published=53,target=53,protocol=udp \
+  dns-cache
+

Short syntax:

$ docker service create --name dns-cache \
+  -p 53:53 \
+  -p 53:53/udp \
+  dns-cache
+

UDP only

Long syntax:

$ docker service create --name dns-cache \
+  --publish published=53,target=53,protocol=udp \
+  dns-cache
+

Short syntax:

$ docker service create --name dns-cache \
+  -p 53:53/udp \
+  dns-cache
+

Bypass the routing mesh

You can bypass the routing mesh, so that when you access the bound port on a given node, you are always accessing the instance of the service running on that node. This is referred to as host mode. There are a few things to keep in mind.

To bypass the routing mesh, you must use the long --publish service and set mode to host. If you omit the mode key or set it to ingress, the routing mesh is used. The following command creates a global service using host mode and bypassing the routing mesh.

$ docker service create --name dns-cache \
+  --publish published=53,target=53,protocol=udp,mode=host \
+  --mode global \
+  dns-cache
+

Configure an external load balancer

You can configure an external load balancer for swarm services, either in combination with the routing mesh or without using the routing mesh at all.

Using the routing mesh

You can configure an external load balancer to route requests to a swarm service. For example, you could configure HAProxy to balance requests to an nginx service published to port 8080.

ingress with external load balancer image

In this case, port 8080 must be open between the load balancer and the nodes in the swarm. The swarm nodes can reside on a private network that is accessible to the proxy server, but that is not publicly accessible.

You can configure the load balancer to balance requests between every node in the swarm even if there are no tasks scheduled on the node. For example, you could have the following HAProxy configuration in /etc/haproxy/haproxy.cfg:

global
+        log /dev/log    local0
+        log /dev/log    local1 notice
+...snip...
+
+# Configure HAProxy to listen on port 80
+frontend http_front
+   bind *:80
+   stats uri /haproxy?stats
+   default_backend http_back
+
+# Configure HAProxy to route requests to swarm nodes on port 8080
+backend http_back
+   balance roundrobin
+   server node1 192.168.99.100:8080 check
+   server node2 192.168.99.101:8080 check
+   server node3 192.168.99.102:8080 check
+

When you access the HAProxy load balancer on port 80, it forwards requests to nodes in the swarm. The swarm routing mesh routes the request to an active task. If, for any reason the swarm scheduler dispatches tasks to different nodes, you don’t need to reconfigure the load balancer.

You can configure any type of load balancer to route requests to swarm nodes. To learn more about HAProxy, see the HAProxy documentation.

Without the routing mesh

To use an external load balancer without the routing mesh, set --endpoint-mode to dnsrr instead of the default value of vip. In this case, there is not a single virtual IP. Instead, Docker sets up DNS entries for the service such that a DNS query for the service name returns a list of IP addresses, and the client connects directly to one of these. You are responsible for providing the list of IP addresses and ports to your load balancer. See Configure service discovery.

Learn more

+

guide, swarm mode, swarm, network, ingress, routing mesh

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/ingress/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fjoin-nodes%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fjoin-nodes%2Findex.html new file mode 100644 index 00000000..5ad31fa1 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fjoin-nodes%2Findex.html @@ -0,0 +1,36 @@ +

Join nodes to a swarm

+ +

When you first create a swarm, you place a single Docker Engine into swarm mode. To take full advantage of swarm mode you can add nodes to the swarm:

The Docker Engine joins the swarm depending on the join-token you provide to the docker swarm join command. The node only uses the token at join time. If you subsequently rotate the token, it doesn’t affect existing swarm nodes. Refer to Run Docker Engine in swarm mode.

Join as a worker node

To retrieve the join command including the join token for worker nodes, run the following command on a manager node:

$ docker swarm join-token worker
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
+    192.168.99.100:2377
+

Run the command from the output on the worker to join the swarm:

$ docker swarm join \
+  --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
+  192.168.99.100:2377
+
+This node joined a swarm as a worker.
+

The docker swarm join command does the following:

Join as a manager node

When you run docker swarm join and pass the manager token, the Docker Engine switches into swarm mode the same as for workers. Manager nodes also participate in the raft consensus. The new nodes should be Reachable, but the existing manager remains the swarm Leader.

Docker recommends three or five manager nodes per cluster to implement high availability. Because swarm mode manager nodes share data using Raft, there must be an odd number of managers. The swarm can continue to function after as long as a quorum of more than half of the manager nodes are available.

For more detail about swarm managers and administering a swarm, see Administer and maintain a swarm of Docker Engines.

To retrieve the join command including the join token for manager nodes, run the following command on a manager node:

$ docker swarm join-token manager
+
+To add a manager to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-61ztec5kyafptydic6jfc1i33t37flcl4nuipzcusor96k7kby-5vy9t8u35tuqm7vh67lrz9xp6 \
+    192.168.99.100:2377
+

Run the command from the output on the new manager node to join it to the swarm:

$ docker swarm join \
+  --token SWMTKN-1-61ztec5kyafptydic6jfc1i33t37flcl4nuipzcusor96k7kby-5vy9t8u35tuqm7vh67lrz9xp6 \
+  192.168.99.100:2377
+
+This node joined a swarm as a manager.
+

Learn More

+

guide, swarm mode, node

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/join-nodes/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fkey-concepts%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fkey-concepts%2Findex.html new file mode 100644 index 00000000..3be49102 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fkey-concepts%2Findex.html @@ -0,0 +1,10 @@ +

Swarm mode key concepts

+ +

This topic introduces some of the concepts unique to the cluster management and orchestration features of Docker Engine 1.12.

What is a swarm?

The cluster management and orchestration features embedded in the Docker Engine are built using swarmkit. Swarmkit is a separate project which implements Docker’s orchestration layer and is used directly within Docker.

A swarm consists of multiple Docker hosts which run in swarm mode and act as managers (to manage membership and delegation) and workers (which run swarm services). A given Docker host can be a manager, a worker, or perform both roles. When you create a service, you define its optimal state (number of replicas, network and storage resources available to it, ports the service exposes to the outside world, and more). Docker works to maintain that desired state. For instance, if a worker node becomes unavailable, Docker schedules that node’s tasks on other nodes. A task is a running container which is part of a swarm service and managed by a swarm manager, as opposed to a standalone container.

One of the key advantages of swarm services over standalone containers is that you can modify a service’s configuration, including the networks and volumes it is connected to, without the need to manually restart the service. Docker will update the configuration, stop the service tasks with the out of date configuration, and create new ones matching the desired configuration.

When Docker is running in swarm mode, you can still run standalone containers on any of the Docker hosts participating in the swarm, as well as swarm services. A key difference between standalone containers and swarm services is that only swarm managers can manage a swarm, while standalone containers can be started on any daemon. Docker daemons can participate in a swarm as managers, workers, or both.

In the same way that you can use Docker Compose to define and run containers, you can define and run Swarm service stacks.

Keep reading for details about concepts relating to Docker swarm services, including nodes, services, tasks, and load balancing.

Nodes

A node is an instance of the Docker engine participating in the swarm. You can also think of this as a Docker node. You can run one or more nodes on a single physical computer or cloud server, but production swarm deployments typically include Docker nodes distributed across multiple physical and cloud machines.

To deploy your application to a swarm, you submit a service definition to a manager node. The manager node dispatches units of work called tasks to worker nodes.

Manager nodes also perform the orchestration and cluster management functions required to maintain the desired state of the swarm. Manager nodes elect a single leader to conduct orchestration tasks.

Worker nodes receive and execute tasks dispatched from manager nodes. By default manager nodes also run services as worker nodes, but you can configure them to run manager tasks exclusively and be manager-only nodes. An agent runs on each worker node and reports on the tasks assigned to it. The worker node notifies the manager node of the current state of its assigned tasks so that the manager can maintain the desired state of each worker.

Services and tasks

A service is the definition of the tasks to execute on the manager or worker nodes. It is the central structure of the swarm system and the primary root of user interaction with the swarm.

When you create a service, you specify which container image to use and which commands to execute inside running containers.

In the replicated services model, the swarm manager distributes a specific number of replica tasks among the nodes based upon the scale you set in the desired state.

For global services, the swarm runs one task for the service on every available node in the cluster.

A task carries a Docker container and the commands to run inside the container. It is the atomic scheduling unit of swarm. Manager nodes assign tasks to worker nodes according to the number of replicas set in the service scale. Once a task is assigned to a node, it cannot move to another node. It can only run on the assigned node or fail.

Load balancing

The swarm manager uses ingress load balancing to expose the services you want to make available externally to the swarm. The swarm manager can automatically assign the service a PublishedPort or you can configure a PublishedPort for the service. You can specify any unused port. If you do not specify a port, the swarm manager assigns the service a port in the 30000-32767 range.

External components, such as cloud load balancers, can access the service on the PublishedPort of any node in the cluster whether or not the node is currently running the task for the service. All nodes in the swarm route ingress connections to a running task instance.

Swarm mode has an internal DNS component that automatically assigns each service in the swarm a DNS entry. The swarm manager uses internal load balancing to distribute requests among services within the cluster based upon the DNS name of the service.

What’s next?

+

docker, container, cluster, swarm mode

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/key-concepts/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fmanage-nodes%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fmanage-nodes%2Findex.html new file mode 100644 index 00000000..41b8ef5a --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fmanage-nodes%2Findex.html @@ -0,0 +1,64 @@ +

Manage nodes in a swarm

+ +

As part of the swarm management lifecycle, you may need to view or update a node as follows:

List nodes

To view a list of nodes in the swarm run docker node ls from a manager node:

$ docker node ls
+
+ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
+46aqrk4e473hjbt745z53cr3t    node-5    Ready   Active        Reachable
+61pi3d91s0w3b90ijw3deeb2q    node-4    Ready   Active        Reachable
+a5b2m3oghd48m8eu391pefq5u    node-3    Ready   Active
+e7p8btxeu3ioshyuj6lxiv6g0    node-2    Ready   Active
+ehkv3bcimagdese79dn78otj5 *  node-1    Ready   Active        Leader
+

The AVAILABILITY column shows whether or not the scheduler can assign tasks to the node:

The MANAGER STATUS column shows node participation in the Raft consensus:

For more information on swarm administration refer to the Swarm administration guide.

Inspect an individual node

You can run docker node inspect <NODE-ID> on a manager node to view the details for an individual node. The output defaults to JSON format, but you can pass the --pretty flag to print the results in human-readable format. For example:

$ docker node inspect self --pretty
+
+ID:                     ehkv3bcimagdese79dn78otj5
+Hostname:               node-1
+Joined at:              2016-06-16 22:52:44.9910662 +0000 utc
+Status:
+ State:                 Ready
+ Availability:          Active
+Manager Status:
+ Address:               172.17.0.2:2377
+ Raft Status:           Reachable
+ Leader:                Yes
+Platform:
+ Operating System:      linux
+ Architecture:          x86_64
+Resources:
+ CPUs:                  2
+ Memory:                1.954 GiB
+Plugins:
+  Network:              overlay, host, bridge, overlay, null
+  Volume:               local
+Engine Version:         1.12.0-dev
+

Update a node

You can modify node attributes as follows:

Change node availability

Changing node availability lets you:

For example, to change a manager node to Drain availability:

$ docker node update --availability drain node-1
+
+node-1
+

See list nodes for descriptions of the different availability options.

Add or remove label metadata

Node labels provide a flexible method of node organization. You can also use node labels in service constraints. Apply constraints when you create a service to limit the nodes where the scheduler assigns tasks for the service.

Run docker node update --label-add on a manager node to add label metadata to a node. The --label-add flag supports either a <key> or a <key>=<value> pair.

Pass the --label-add flag once for each node label you want to add:

$ docker node update --label-add foo --label-add bar=baz node-1
+
+node-1
+

The labels you set for nodes using docker node update apply only to the node entity within the swarm. Do not confuse them with the docker daemon labels for dockerd.

Therefore, node labels can be used to limit critical tasks to nodes that meet certain requirements. For example, schedule only on machines where special workloads should be run, such as machines that meet PCI-SS compliance.

A compromised worker could not compromise these special workloads because it cannot change node labels.

Engine labels, however, are still useful because some features that do not affect secure orchestration of containers might be better off set in a decentralized manner. For instance, an engine could have a label to indicate that it has a certain type of disk device, which may not be relevant to security directly. These labels are more easily “trusted” by the swarm orchestrator.

Refer to the docker service create CLI reference for more information about service constraints.

Promote or demote a node

You can promote a worker node to the manager role. This is useful when a manager node becomes unavailable or if you want to take a manager offline for maintenance. Similarly, you can demote a manager node to the worker role.

Note: Regardless of your reason to promote or demote a node, you must always maintain a quorum of manager nodes in the swarm. For more information refer to the Swarm administration guide.

To promote a node or set of nodes, run docker node promote from a manager node:

$ docker node promote node-3 node-2
+
+Node node-3 promoted to a manager in the swarm.
+Node node-2 promoted to a manager in the swarm.
+

To demote a node or set of nodes, run docker node demote from a manager node:

$ docker node demote node-3 node-2
+
+Manager node-3 demoted in the swarm.
+Manager node-2 demoted in the swarm.
+

docker node promote and docker node demote are convenience commands for docker node update --role manager and docker node update --role worker respectively.

Install plugins on swarm nodes

If your swarm service relies on one or more plugins, these plugins need to be available on every node where the service could potentially be deployed. You can manually install the plugin on each node or script the installation. You can also deploy the plugin in a similar way as a global service using the Docker API, by specifying a PluginSpec instead of a ContainerSpec.

Note

There is currently no way to deploy a plugin to a swarm using the Docker CLI or Docker Compose. In addition, it is not possible to install plugins from a private repository.

The PluginSpec is defined by the plugin developer. To add the plugin to all Docker nodes, use the service/create API, passing the PluginSpec JSON defined in the TaskTemplate.

Leave the swarm

Run the docker swarm leave command on a node to remove it from the swarm.

For example to leave the swarm on a worker node:

$ docker swarm leave
+
+Node left the swarm.
+

When a node leaves the swarm, the Docker Engine stops running in swarm mode. The orchestrator no longer schedules tasks to the node.

If the node is a manager node, you receive a warning about maintaining the quorum. To override the warning, pass the --force flag. If the last manager node leaves the swarm, the swarm becomes unavailable requiring you to take disaster recovery measures.

For information about maintaining a quorum and disaster recovery, refer to the Swarm administration guide.

After a node leaves the swarm, you can run the docker node rm command on a manager node to remove the node from the node list.

For instance:

$ docker node rm node-2
+

Learn more

+

guide, swarm mode, node

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/manage-nodes/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fraft%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fraft%2Findex.html new file mode 100644 index 00000000..8b672c37 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fraft%2Findex.html @@ -0,0 +1,13 @@ +

Raft consensus in swarm mode

+

When the Docker Engine runs in swarm mode, manager nodes implement the Raft Consensus Algorithm to manage the global cluster state.

The reason why Docker swarm mode is using a consensus algorithm is to make sure that all the manager nodes that are in charge of managing and scheduling tasks in the cluster, are storing the same consistent state.

Having the same consistent state across the cluster means that in case of a failure, any Manager node can pick up the tasks and restore the services to a stable state. For example, if the Leader Manager which is responsible for scheduling tasks in the cluster dies unexpectedly, any other Manager can pick up the task of scheduling and re-balance tasks to match the desired state.

Systems using consensus algorithms to replicate logs in a distributed systems do require special care. They ensure that the cluster state stays consistent in the presence of failures by requiring a majority of nodes to agree on values.

Raft tolerates up to (N-1)/2 failures and requires a majority or quorum of (N/2)+1 members to agree on values proposed to the cluster. This means that in a cluster of 5 Managers running Raft, if 3 nodes are unavailable, the system cannot process any more requests to schedule additional tasks. The existing tasks keep running but the scheduler cannot rebalance tasks to cope with failures if the manager set is not healthy.

The implementation of the consensus algorithm in swarm mode means it features the properties inherent to distributed systems:

+

docker, container, cluster, swarm, raft

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/raft/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fsecrets%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fsecrets%2Findex.html new file mode 100644 index 00000000..d91a6558 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fsecrets%2Findex.html @@ -0,0 +1,333 @@ +

Manage sensitive data with Docker secrets

+ +

About secrets

In terms of Docker Swarm services, a secret is a blob of data, such as a password, SSH private key, SSL certificate, or another piece of data that should not be transmitted over a network or stored unencrypted in a Dockerfile or in your application’s source code. You can use Docker secrets to centrally manage this data and securely transmit it to only those containers that need access to it. Secrets are encrypted during transit and at rest in a Docker swarm. A given secret is only accessible to those services which have been granted explicit access to it, and only while those service tasks are running.

You can use secrets to manage any sensitive data which a container needs at runtime but you don’t want to store in the image or in source control, such as:

Note: Docker secrets are only available to swarm services, not to standalone containers. To use this feature, consider adapting your container to run as a service. Stateful containers can typically run with a scale of 1 without changing the container code.

Another use case for using secrets is to provide a layer of abstraction between the container and a set of credentials. Consider a scenario where you have separate development, test, and production environments for your application. Each of these environments can have different credentials, stored in the development, test, and production swarms with the same secret name. Your containers only need to know the name of the secret to function in all three environments.

You can also use secrets to manage non-sensitive data, such as configuration files. However, Docker supports the use of configs for storing non-sensitive data. Configs are mounted into the container’s filesystem directly, without the use of a RAM disk.

Windows support

Docker includes support for secrets on Windows containers. Where there are differences in the implementations, they are called out in the examples below. Keep the following notable differences in mind:

How Docker manages secrets

When you add a secret to the swarm, Docker sends the secret to the swarm manager over a mutual TLS connection. The secret is stored in the Raft log, which is encrypted. The entire Raft log is replicated across the other managers, ensuring the same high availability guarantees for secrets as for the rest of the swarm management data.

When you grant a newly-created or running service access to a secret, the decrypted secret is mounted into the container in an in-memory filesystem. The location of the mount point within the container defaults to /run/secrets/<secret_name> in Linux containers, or C:\ProgramData\Docker\secrets in Windows containers. You can also specify a custom location.

You can update a service to grant it access to additional secrets or revoke its access to a given secret at any time.

A node only has access to (encrypted) secrets if the node is a swarm manager or if it is running service tasks which have been granted access to the secret. When a container task stops running, the decrypted secrets shared to it are unmounted from the in-memory filesystem for that container and flushed from the node’s memory.

If a node loses connectivity to the swarm while it is running a task container with access to a secret, the task container still has access to its secrets, but cannot receive updates until the node reconnects to the swarm.

You can add or inspect an individual secret at any time, or list all secrets. You cannot remove a secret that a running service is using. See Rotate a secret for a way to remove a secret without disrupting running services.

To update or roll back secrets more easily, consider adding a version number or date to the secret name. This is made easier by the ability to control the mount point of the secret within a given container.

Read more about docker secret commands

Use these links to read about specific commands, or continue to the example about using secrets with a service.

Examples

This section includes three graduated examples which illustrate how to use Docker secrets. The images used in these examples have been updated to make it easier to use Docker secrets. To find out how to modify your own images in a similar way, see Build support for Docker Secrets into your images.

Note: These examples use a single-Engine swarm and unscaled services for simplicity. The examples use Linux containers, but Windows containers also support secrets. See Windows support.

Defining and using secrets in compose files

Both the docker-compose and docker stack commands support defining secrets in a compose file. See the Compose file reference for details.

Simple example: Get started with secrets

This simple example shows how secrets work in just a few commands. For a real-world example, continue to Intermediate example: Use secrets with a Nginx service.

  1. Add a secret to Docker. The docker secret create command reads standard input because the last argument, which represents the file to read the secret from, is set to -.

    $ printf "This is a secret" | docker secret create my_secret_data -
    +
  2. Create a redis service and grant it access to the secret. By default, the container can access the secret at /run/secrets/<secret_name>, but you can customize the file name on the container using the target option.

    $ docker service  create --name redis --secret my_secret_data redis:alpine
    +
  3. Verify that the task is running without issues using docker service ps. If everything is working, the output looks similar to this:

    $ docker service ps redis
    +
    +ID            NAME     IMAGE         NODE              DESIRED STATE  CURRENT STATE          ERROR  PORTS
    +bkna6bpn8r1a  redis.1  redis:alpine  ip-172-31-46-109  Running        Running 8 seconds ago  
    +

    If there were an error, and the task were failing and repeatedly restarting, you would see something like this:

    $ docker service ps redis
    +
    +NAME                      IMAGE         NODE  DESIRED STATE  CURRENT STATE          ERROR                      PORTS
    +redis.1.siftice35gla      redis:alpine  moby  Running        Running 4 seconds ago                             
    + \_ redis.1.whum5b7gu13e  redis:alpine  moby  Shutdown       Failed 20 seconds ago      "task: non-zero exit (1)"  
    + \_ redis.1.2s6yorvd9zow  redis:alpine  moby  Shutdown       Failed 56 seconds ago      "task: non-zero exit (1)"  
    + \_ redis.1.ulfzrcyaf6pg  redis:alpine  moby  Shutdown       Failed about a minute ago  "task: non-zero exit (1)"  
    + \_ redis.1.wrny5v4xyps6  redis:alpine  moby  Shutdown       Failed 2 minutes ago       "task: non-zero exit (1)"
    +
  4. Get the ID of the redis service task container using docker ps , so that you can use docker container exec to connect to the container and read the contents of the secret data file, which defaults to being readable by all and has the same name as the name of the secret. The first command below illustrates how to find the container ID, and the second and third commands use shell completion to do this automatically.

    $ docker ps --filter name=redis -q
    +
    +5cb1c2348a59
    +
    +$ docker container exec $(docker ps --filter name=redis -q) ls -l /run/secrets
    +
    +total 4
    +-r--r--r--    1 root     root            17 Dec 13 22:48 my_secret_data
    +
    +$ docker container exec $(docker ps --filter name=redis -q) cat /run/secrets/my_secret_data
    +
    +This is a secret
    +
  5. Verify that the secret is not available if you commit the container.

    $ docker commit $(docker ps --filter name=redis -q) committed_redis
    +
    +$ docker run --rm -it committed_redis cat /run/secrets/my_secret_data
    +
    +cat: can't open '/run/secrets/my_secret_data': No such file or directory
    +
  6. Try removing the secret. The removal fails because the redis service is running and has access to the secret.

    $ docker secret ls
    +
    +ID                          NAME                CREATED             UPDATED
    +wwwrxza8sxy025bas86593fqs   my_secret_data      4 hours ago         4 hours ago
    +
    +
    +$ docker secret rm my_secret_data
    +
    +Error response from daemon: rpc error: code = 3 desc = secret
    +'my_secret_data' is in use by the following service: redis
    +
  7. Remove access to the secret from the running redis service by updating the service.

    $ docker service update --secret-rm my_secret_data redis
    +
  8. Repeat steps 3 and 4 again, verifying that the service no longer has access to the secret. The container ID is different, because the service update command redeploys the service.

    $ docker container exec -it $(docker ps --filter name=redis -q) cat /run/secrets/my_secret_data
    +
    +cat: can't open '/run/secrets/my_secret_data': No such file or directory
    +
  9. Stop and remove the service, and remove the secret from Docker.

    $ docker service rm redis
    +
    +$ docker secret rm my_secret_data
    +

Simple example: Use secrets in a Windows service

This is a very simple example which shows how to use secrets with a Microsoft IIS service running on Docker for Windows running Windows containers on Microsoft Windows 10. It is a naive example that stores the webpage in a secret.

This example assumes that you have PowerShell installed.

  1. Save the following into a new file index.html.

    <html lang="en">
    +  <head><title>Hello Docker</title></head>
    +  <body>
    +    <p>Hello Docker! You have deployed a HTML page.</p>
    +  </body>
    +</html>
    +
  2. If you have not already done so, initialize or join the swarm.

    docker swarm init
    +
  3. Save the index.html file as a swarm secret named homepage.

    docker secret create homepage index.html
    +
  4. Create an IIS service and grant it access to the homepage secret.

    docker service create
    +    --name my-iis
    +    --publish published=8000,target=8000
    +    --secret src=homepage,target="\inetpub\wwwroot\index.html"
    +    microsoft/iis:nanoserver  
    +

    Note: There is technically no reason to use secrets for this example; configs are a better fit. This example is for illustration only.

  5. Access the IIS service at http://localhost:8000/. It should serve the HTML content from the first step.

  6. Remove the service and the secret.

    docker service rm my-iis
    +docker secret rm homepage
    +docker image remove secret-test
    +

Intermediate example: Use secrets with a Nginx service

This example is divided into two parts. The first part is all about generating the site certificate and does not directly involve Docker secrets at all, but it sets up the second part, where you store and use the site certificate and Nginx configuration as secrets.

Generate the site certificate

Generate a root CA and TLS certificate and key for your site. For production sites, you may want to use a service such as Let’s Encrypt to generate the TLS certificate and key, but this example uses command-line tools. This step is a little complicated, but is only a set-up step so that you have something to store as a Docker secret. If you want to skip these sub-steps, you can use Let’s Encrypt to generate the site key and certificate, name the files site.key and site.crt, and skip to Configure the Nginx container.

  1. Generate a root key.

    $ openssl genrsa -out "root-ca.key" 4096
    +
  2. Generate a CSR using the root key.

    $ openssl req \
    +          -new -key "root-ca.key" \
    +          -out "root-ca.csr" -sha256 \
    +          -subj '/C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA'
    +
  3. Configure the root CA. Edit a new file called root-ca.cnf and paste the following contents into it. This constrains the root CA to signing leaf certificates and not intermediate CAs.

    [root_ca]
    +basicConstraints = critical,CA:TRUE,pathlen:1
    +keyUsage = critical, nonRepudiation, cRLSign, keyCertSign
    +subjectKeyIdentifier=hash
    +
  4. Sign the certificate.

    $ openssl x509 -req  -days 3650  -in "root-ca.csr" \
    +               -signkey "root-ca.key" -sha256 -out "root-ca.crt" \
    +               -extfile "root-ca.cnf" -extensions \
    +               root_ca
    +
  5. Generate the site key.

    $ openssl genrsa -out "site.key" 4096
    +
  6. Generate the site certificate and sign it with the site key.

    $ openssl req -new -key "site.key" -out "site.csr" -sha256 \
    +          -subj '/C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost'
    +
  7. Configure the site certificate. Edit a new file called site.cnf and paste the following contents into it. This constrains the site certificate so that it can only be used to authenticate a server and can’t be used to sign certificates.

    [server]
    +authorityKeyIdentifier=keyid,issuer
    +basicConstraints = critical,CA:FALSE
    +extendedKeyUsage=serverAuth
    +keyUsage = critical, digitalSignature, keyEncipherment
    +subjectAltName = DNS:localhost, IP:127.0.0.1
    +subjectKeyIdentifier=hash
    +
  8. Sign the site certificate.

    $ openssl x509 -req -days 750 -in "site.csr" -sha256 \
    +    -CA "root-ca.crt" -CAkey "root-ca.key"  -CAcreateserial \
    +    -out "site.crt" -extfile "site.cnf" -extensions server
    +
  9. The site.csr and site.cnf files are not needed by the Nginx service, but you need them if you want to generate a new site certificate. Protect the root-ca.key file.

Configure the Nginx container

  1. Produce a very basic Nginx configuration that serves static files over HTTPS. The TLS certificate and key are stored as Docker secrets so that they can be rotated easily.

    In the current directory, create a new file called site.conf with the following contents:

    server {
    +    listen                443 ssl;
    +    server_name           localhost;
    +    ssl_certificate       /run/secrets/site.crt;
    +    ssl_certificate_key   /run/secrets/site.key;
    +
    +    location / {
    +        root   /usr/share/nginx/html;
    +        index  index.html index.htm;
    +    }
    +}
    +
  2. Create three secrets, representing the key, the certificate, and the site.conf. You can store any file as a secret as long as it is smaller than 500 KB. This allows you to decouple the key, certificate, and configuration from the services that use them. In each of these commands, the last argument represents the path to the file to read the secret from on the host machine’s filesystem. In these examples, the secret name and the file name are the same.

    $ docker secret create site.key site.key
    +
    +$ docker secret create site.crt site.crt
    +
    +$ docker secret create site.conf site.conf
    +
    $ docker secret ls
    +
    +ID                          NAME                  CREATED             UPDATED
    +2hvoi9mnnaof7olr3z5g3g7fp   site.key       58 seconds ago      58 seconds ago
    +aya1dh363719pkiuoldpter4b   site.crt       24 seconds ago      24 seconds ago
    +zoa5df26f7vpcoz42qf2csth8   site.conf      11 seconds ago      11 seconds ago
    +
  3. Create a service that runs Nginx and has access to the three secrets. The last part of the docker service create command creates a symbolic link from the location of the site.conf secret to /etc/nginx.conf.d/, where Nginx looks for extra configuration files. This step happens before Nginx actually starts, so you don’t need to rebuild your image if you change the Nginx configuration.

    Note: Normally you would create a Dockerfile which copies the site.conf into place, build the image, and run a container using your custom image. This example does not require a custom image. It puts the site.conf into place and runs the container all in one step.

    Secrets are located within the /run/secrets/ directory in the container by default, which may require extra steps in the container to make the secret available in a different path. The example below creates a symbolic link to the true location of the site.conf file so that Nginx can read it:

    $ docker service create \
    +     --name nginx \
    +     --secret site.key \
    +     --secret site.crt \
    +     --secret site.conf \
    +     --publish published=3000,target=443 \
    +     nginx:latest \
    +     sh -c "ln -s /run/secrets/site.conf /etc/nginx/conf.d/site.conf && exec nginx -g 'daemon off;'"
    +

    Instead of creating symlinks, secrets allow you to specify a custom location using the target option. The example below illustrates how the site.conf secret is made available at /etc/nginx/conf.d/site.conf inside the container without the use of symbolic links:

    $ docker service create \
    +     --name nginx \
    +     --secret site.key \
    +     --secret site.crt \
    +     --secret source=site.conf,target=/etc/nginx/conf.d/site.conf \
    +     --publish published=3000,target=443 \
    +     nginx:latest \
    +     sh -c "exec nginx -g 'daemon off;'"
    +

    The site.key and site.crt secrets use the short-hand syntax, without a custom target location set. The short syntax mounts the secrets in `/run/secrets/ with the same name as the secret. Within the running containers, the following three files now exist:

    • /run/secrets/site.key
    • /run/secrets/site.crt
    • /etc/nginx/conf.d/site.conf
  4. Verify that the Nginx service is running.

    $ docker service ls
    +
    +ID            NAME   MODE        REPLICAS  IMAGE
    +zeskcec62q24  nginx  replicated  1/1       nginx:latest
    +
    +$ docker service ps nginx
    +
    +NAME                  IMAGE         NODE  DESIRED STATE  CURRENT STATE          ERROR  PORTS
    +nginx.1.9ls3yo9ugcls  nginx:latest  moby  Running        Running 3 minutes ago
    +
  5. Verify that the service is operational: you can reach the Nginx server, and that the correct TLS certificate is being used.

    $ curl --cacert root-ca.crt https://localhost:3000
    +
    +<!DOCTYPE html>
    +<html>
    +<head>
    +<title>Welcome to nginx!</title>
    +<style>
    +    body {
    +        width: 35em;
    +        margin: 0 auto;
    +        font-family: Tahoma, Verdana, Arial, sans-serif;
    +    }
    +</style>
    +</head>
    +<body>
    +<h1>Welcome to nginx!</h1>
    +<p>If you see this page, the nginx web server is successfully installed and
    +working. Further configuration is required.</p>
    +
    +<p>For online documentation and support. refer to
    +<a href="https://nginx.org">nginx.org</a>.<br/>
    +Commercial support is available at
    +<a href="https://www.nginx.com">nginx.com</a>.</p>
    +
    +<p><em>Thank you for using nginx.</em></p>
    +</body>
    +</html>
    +
    $ openssl s_client -connect localhost:3000 -CAfile root-ca.crt
    +
    +CONNECTED(00000003)
    +depth=1 /C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA
    +verify return:1
    +depth=0 /C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost
    +verify return:1
    +---
    +Certificate chain
    + 0 s:/C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost
    +   i:/C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA
    +---
    +Server certificate
    +-----BEGIN CERTIFICATE-----
    +…
    +-----END CERTIFICATE-----
    +subject=/C=US/ST=CA/L=San Francisco/O=Docker/CN=localhost
    +issuer=/C=US/ST=CA/L=San Francisco/O=Docker/CN=Swarm Secret Example CA
    +---
    +No client certificate CA names sent
    +---
    +SSL handshake has read 1663 bytes and written 712 bytes
    +---
    +New, TLSv1/SSLv3, Cipher is AES256-SHA
    +Server public key is 4096 bit
    +Secure Renegotiation IS supported
    +Compression: NONE
    +Expansion: NONE
    +SSL-Session:
    +    Protocol  : TLSv1
    +    Cipher    : AES256-SHA
    +    Session-ID: A1A8BF35549C5715648A12FD7B7E3D861539316B03440187D9DA6C2E48822853
    +    Session-ID-ctx:
    +    Master-Key: F39D1B12274BA16D3A906F390A61438221E381952E9E1E05D3DD784F0135FB81353DA38C6D5C021CB926E844DFC49FC4
    +    Key-Arg   : None
    +    Start Time: 1481685096
    +    Timeout   : 300 (sec)
    +    Verify return code: 0 (ok)
    +
  6. To clean up after running this example, remove the nginx service and the stored secrets.

    $ docker service rm nginx
    +
    +$ docker secret rm site.crt site.key site.conf
    +

Advanced example: Use secrets with a WordPress service

In this example, you create a single-node MySQL service with a custom root password, add the credentials as secrets, and create a single-node WordPress service which uses these credentials to connect to MySQL. The next example builds on this one and shows you how to rotate the MySQL password and update the services so that the WordPress service can still connect to MySQL.

This example illustrates some techniques to use Docker secrets to avoid saving sensitive credentials within your image or passing them directly on the command line.

Note: This example uses a single-Engine swarm for simplicity, and uses a single-node MySQL service because a single MySQL server instance cannot be scaled by simply using a replicated service, and setting up a MySQL cluster is beyond the scope of this example.

Also, changing a MySQL root passphrase isn’t as simple as changing a file on disk. You must use a query or a mysqladmin command to change the password in MySQL.

  1. Generate a random alphanumeric password for MySQL and store it as a Docker secret with the name mysql_password using the docker secret create command. To make the password shorter or longer, adjust the last argument of the openssl command. This is just one way to create a relatively random password. You can use another command to generate the password if you choose.

    Note: After you create a secret, you cannot update it. You can only remove and re-create it, and you cannot remove a secret that a service is using. However, you can grant or revoke a running service’s access to secrets using docker service update. If you need the ability to update a secret, consider adding a version component to the secret name, so that you can later add a new version, update the service to use it, then remove the old version.

    The last argument is set to -, which indicates that the input is read from standard input.

    $ openssl rand -base64 20 | docker secret create mysql_password -
    +
    +l1vinzevzhj4goakjap5ya409
    +

    The value returned is not the password, but the ID of the secret. In the remainder of this tutorial, the ID output is omitted.

    Generate a second secret for the MySQL root user. This secret isn’t shared with the WordPress service created later. It’s only needed to bootstrap the mysql service.

    $ openssl rand -base64 20 | docker secret create mysql_root_password -
    +

    List the secrets managed by Docker using docker secret ls:

    $ docker secret ls
    +
    +ID                          NAME                  CREATED             UPDATED
    +l1vinzevzhj4goakjap5ya409   mysql_password        41 seconds ago      41 seconds ago
    +yvsczlx9votfw3l0nz5rlidig   mysql_root_password   12 seconds ago      12 seconds ago
    +

    The secrets are stored in the encrypted Raft logs for the swarm.

  2. Create a user-defined overlay network which is used for communication between the MySQL and WordPress services. There is no need to expose the MySQL service to any external host or container.

    $ docker network create -d overlay mysql_private
    +
  3. Create the MySQL service. The MySQL service has the following characteristics:

    • Because the scale is set to 1, only a single MySQL task runs. Load-balancing MySQL is left as an exercise to the reader and involves more than just scaling the service.
    • Only reachable by other containers on the mysql_private network.
    • Uses the volume mydata to store the MySQL data, so that it persists across restarts to the mysql service.
    • The secrets are each mounted in a tmpfs filesystem at /run/secrets/mysql_password and /run/secrets/mysql_root_password. They are never exposed as environment variables, nor can they be committed to an image if the docker commit command is run. The mysql_password secret is the one used by the non-privileged WordPress container to connect to MySQL.
    • Sets the environment variables MYSQL_PASSWORD_FILE and MYSQL_ROOT_PASSWORD_FILE to point to the files /run/secrets/mysql_password and /run/secrets/mysql_root_password. The mysql image reads the password strings from those files when initializing the system database for the first time. Afterward, the passwords are stored in the MySQL system database itself.
    • Sets environment variables MYSQL_USER and MYSQL_DATABASE. A new database called wordpress is created when the container starts, and the wordpress user has full permissions for this database only. This user cannot create or drop databases or change the MySQL configuration.

      $ docker service create \
      +     --name mysql \
      +     --replicas 1 \
      +     --network mysql_private \
      +     --mount type=volume,source=mydata,destination=/var/lib/mysql \
      +     --secret source=mysql_root_password,target=mysql_root_password \
      +     --secret source=mysql_password,target=mysql_password \
      +     -e MYSQL_ROOT_PASSWORD_FILE="/run/secrets/mysql_root_password" \
      +     -e MYSQL_PASSWORD_FILE="/run/secrets/mysql_password" \
      +     -e MYSQL_USER="wordpress" \
      +     -e MYSQL_DATABASE="wordpress" \
      +     mysql:latest
      +
  4. Verify that the mysql container is running using the docker service ls command.

    $ docker service ls
    +
    +ID            NAME   MODE        REPLICAS  IMAGE
    +wvnh0siktqr3  mysql  replicated  1/1       mysql:latest
    +

    At this point, you could actually revoke the mysql service’s access to the mysql_password and mysql_root_password secrets because the passwords have been saved in the MySQL system database. Don’t do that for now, because we use them later to facilitate rotating the MySQL password.

  5. Now that MySQL is set up, create a WordPress service that connects to the MySQL service. The WordPress service has the following characteristics:

    • Because the scale is set to 1, only a single WordPress task runs. Load-balancing WordPress is left as an exercise to the reader, because of limitations with storing WordPress session data on the container filesystem.
    • Exposes WordPress on port 30000 of the host machine, so that you can access it from external hosts. You can expose port 80 instead if you do not have a web server running on port 80 of the host machine.
    • Connects to the mysql_private network so it can communicate with the mysql container, and also publishes port 80 to port 30000 on all swarm nodes.
    • Has access to the mysql_password secret, but specifies a different target file name within the container. The WordPress container uses the mount point /run/secrets/wp_db_password. Also specifies that the secret is not group-or-world-readable, by setting the mode to 0400.
    • Sets the environment variable WORDPRESS_DB_PASSWORD_FILE to the file path where the secret is mounted. The WordPress service reads the MySQL password string from that file and add it to the wp-config.php configuration file.
    • Connects to the MySQL container using the username wordpress and the password in /run/secrets/wp_db_password and creates the wordpress database if it does not yet exist.
    • Stores its data, such as themes and plugins, in a volume called wpdata so these files persist when the service restarts.
    $ docker service create \
    +     --name wordpress \
    +     --replicas 1 \
    +     --network mysql_private \
    +     --publish published=30000,target=80 \
    +     --mount type=volume,source=wpdata,destination=/var/www/html \
    +     --secret source=mysql_password,target=wp_db_password,mode=0400 \
    +     -e WORDPRESS_DB_USER="wordpress" \
    +     -e WORDPRESS_DB_PASSWORD_FILE="/run/secrets/wp_db_password" \
    +     -e WORDPRESS_DB_HOST="mysql:3306" \
    +     -e WORDPRESS_DB_NAME="wordpress" \
    +     wordpress:latest
    +
  6. Verify the service is running using docker service ls and docker service ps commands.

    $ docker service ls
    +
    +ID            NAME       MODE        REPLICAS  IMAGE
    +wvnh0siktqr3  mysql      replicated  1/1       mysql:latest
    +nzt5xzae4n62  wordpress  replicated  1/1       wordpress:latest
    +
    $ docker service ps wordpress
    +
    +ID            NAME         IMAGE             NODE  DESIRED STATE  CURRENT STATE           ERROR  PORTS
    +aukx6hgs9gwc  wordpress.1  wordpress:latest  moby  Running        Running 52 seconds ago   
    +

    At this point, you could actually revoke the WordPress service’s access to the mysql_password secret, because WordPress has copied the secret to its configuration file wp-config.php. Don’t do that for now, because we use it later to facilitate rotating the MySQL password.

  7. Access http://localhost:30000/ from any swarm node and set up WordPress using the web-based wizard. All of these settings are stored in the MySQL wordpress database. WordPress automatically generates a password for your WordPress user, which is completely different from the password WordPress uses to access MySQL. Store this password securely, such as in a password manager. You need it to log into WordPress after rotating the secret.

    Go ahead and write a blog post or two and install a WordPress plugin or theme to verify that WordPress is fully operational and its state is saved across service restarts.

  8. Do not clean up any services or secrets if you intend to proceed to the next example, which demonstrates how to rotate the MySQL root password.

Example: Rotate a secret

This example builds upon the previous one. In this scenario, you create a new secret with a new MySQL password, update the mysql and wordpress services to use it, then remove the old secret.

Note: Changing the password on a MySQL database involves running extra queries or commands, as opposed to just changing a single environment variable or a file, since the image only sets the MySQL password if the database doesn’t already exist, and MySQL stores the password within a MySQL database by default. Rotating passwords or other secrets may involve additional steps outside of Docker.

  1. Create the new password and store it as a secret named mysql_password_v2.

    $ openssl rand -base64 20 | docker secret create mysql_password_v2 -
    +
  2. Update the MySQL service to give it access to both the old and new secrets. Remember that you cannot update or rename a secret, but you can revoke a secret and grant access to it using a new target filename.

    $ docker service update \
    +     --secret-rm mysql_password mysql
    +
    +$ docker service update \
    +     --secret-add source=mysql_password,target=old_mysql_password \
    +     --secret-add source=mysql_password_v2,target=mysql_password \
    +     mysql
    +

    Updating a service causes it to restart, and when the MySQL service restarts the second time, it has access to the old secret under /run/secrets/old_mysql_password and the new secret under /run/secrets/mysql_password.

    Even though the MySQL service has access to both the old and new secrets now, the MySQL password for the WordPress user has not yet been changed.

    Note: This example does not rotate the MySQL root password.

  3. Now, change the MySQL password for the wordpress user using the mysqladmin CLI. This command reads the old and new password from the files in /run/secrets but does not expose them on the command line or save them in the shell history.

    Do this quickly and move on to the next step, because WordPress loses the ability to connect to MySQL.

    First, find the ID of the mysql container task.

    $ docker ps --filter name=mysql -q
    +
    +c7705cf6176f
    +

    Substitute the ID in the command below, or use the second variant which uses shell expansion to do it all in a single step.

    $ docker container exec <CONTAINER_ID> \
    +    bash -c 'mysqladmin --user=wordpress --password="$(< /run/secrets/old_mysql_password)" password "$(< /run/secrets/mysql_password)"'
    +

    or:

    $ docker container exec $(docker ps --filter name=mysql -q) \
    +    bash -c 'mysqladmin --user=wordpress --password="$(< /run/secrets/old_mysql_password)" password "$(< /run/secrets/mysql_password)"'
    +
  4. Update the wordpress service to use the new password, keeping the target path at /run/secrets/wp_db_password and keeping the file permissions at 0400. This triggers a rolling restart of the WordPress service and the new secret is used.

    $ docker service update \
    +     --secret-rm mysql_password \
    +     --secret-add source=mysql_password_v2,target=wp_db_password,mode=0400 \
    +     wordpress    
    +
  5. Verify that WordPress works by browsing to http://localhost:30000/ on any swarm node again. Use the WordPress username and password from when you ran through the WordPress wizard in the previous task.

    Verify that the blog post you wrote still exists, and if you changed any configuration values, verify that they are still changed.

  6. Revoke access to the old secret from the MySQL service and remove the old secret from Docker.

    $ docker service update \
    +     --secret-rm mysql_password \
    +     mysql
    +
    +$ docker secret rm mysql_password
    +
  7. If you want to try the running all of these examples again or just want to clean up after running through them, use these commands to remove the WordPress service, the MySQL container, the mydata and wpdata volumes, and the Docker secrets.

    $ docker service rm wordpress mysql
    +
    +$ docker volume rm mydata wpdata
    +
    +$ docker secret rm mysql_password_v2 mysql_root_password
    +

Build support for Docker Secrets into your images

If you develop a container that can be deployed as a service and requires sensitive data, such as a credential, as an environment variable, consider adapting your image to take advantage of Docker secrets. One way to do this is to ensure that each parameter you pass to the image when creating the container can also be read from a file.

Many of the Docker Official Images in the Docker library, such as the wordpress image used in the above examples, have been updated in this way.

When you start a WordPress container, you provide it with the parameters it needs by setting them as environment variables. The WordPress image has been updated so that the environment variables which contain important data for WordPress, such as WORDPRESS_DB_PASSWORD, also have variants which can read their values from a file (WORDPRESS_DB_PASSWORD_FILE). This strategy ensures that backward compatibility is preserved, while allowing your container to read the information from a Docker-managed secret instead of being passed directly.

Note

Docker secrets do not set environment variables directly. This was a conscious decision, because environment variables can unintentionally be leaked between containers (for instance, if you use --link).

Use Secrets in Compose

version: "3.9"
+
+services:
+   db:
+     image: mysql:latest
+     volumes:
+       - db_data:/var/lib/mysql
+     environment:
+       MYSQL_ROOT_PASSWORD_FILE: /run/secrets/db_root_password
+       MYSQL_DATABASE: wordpress
+       MYSQL_USER: wordpress
+       MYSQL_PASSWORD_FILE: /run/secrets/db_password
+     secrets:
+       - db_root_password
+       - db_password
+
+   wordpress:
+     depends_on:
+       - db
+     image: wordpress:latest
+     ports:
+       - "8000:80"
+     environment:
+       WORDPRESS_DB_HOST: db:3306
+       WORDPRESS_DB_USER: wordpress
+       WORDPRESS_DB_PASSWORD_FILE: /run/secrets/db_password
+     secrets:
+       - db_password
+
+
+secrets:
+   db_password:
+     file: db_password.txt
+   db_root_password:
+     file: db_root_password.txt
+
+volumes:
+    db_data:
+

This example creates a simple WordPress site using two secrets in a compose file.

The keyword secrets: defines two secrets db_password: and db_root_password:.

When deploying, Docker creates these two secrets and populates them with the content from the file specified in the compose file.

The db service uses both secrets, and the wordpress is using one.

When you deploy, Docker mounts a file under /run/secrets/<secret_name> in the services. These files are never persisted in disk, but are managed in memory.

Each service uses environment variables to specify where the service should look for that secret data.

More information on short and long syntax for secrets can be found at Compose file version 3 reference.

+

swarm, secrets, credentials, sensitive strings, sensitive data, security, encryption, encryption at rest

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/secrets/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fservices%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fservices%2Findex.html new file mode 100644 index 00000000..2efc2b4d --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fservices%2Findex.html @@ -0,0 +1,154 @@ +

Deploy services to a swarm

+ +

Swarm services use a declarative model, which means that you define the desired state of the service, and rely upon Docker to maintain this state. The state includes information such as (but not limited to):

For an overview of swarm mode, see Swarm mode key concepts. For an overview of how services work, see How services work.

Create a service

To create a single-replica service with no extra configuration, you only need to supply the image name. This command starts an Nginx service with a randomly-generated name and no published ports. This is a naive example, since you can’t interact with the Nginx service.

$ docker service create nginx
+

The service is scheduled on an available node. To confirm that the service was created and started successfully, use the docker service ls command:

$ docker service ls
+
+ID                  NAME                MODE                REPLICAS            IMAGE                                                                                             PORTS
+a3iixnklxuem        quizzical_lamarr    replicated          1/1                 docker.io/library/nginx@sha256:41ad9967ea448d7c2b203c699b429abe1ed5af331cd92533900c6d77490e0268
+

Created services do not always run right away. A service can be in a pending state if its image is unavailable, if no node meets the requirements you configure for the service, or other reasons. See Pending services for more information.

To provide a name for your service, use the --name flag:

$ docker service create --name my_web nginx
+

Just like with standalone containers, you can specify a command that the service’s containers should run, by adding it after the image name. This example starts a service called helloworld which uses an alpine image and runs the command ping docker.com:

$ docker service create --name helloworld alpine ping docker.com
+

You can also specify an image tag for the service to use. This example modifies the previous one to use the alpine:3.6 tag:

$ docker service create --name helloworld alpine:3.6 ping docker.com
+

For more details about image tag resolution, see Specify the image version the service should use.

gMSA for Swarm

Swarm now allows using a Docker Config as a gMSA credential spec - a requirement for Active Directory-authenticated applications. This reduces the burden of distributing credential specs to the nodes they’re used on.

The following example assumes a gMSA and its credential spec (called credspec.json) already exists, and that the nodes being deployed to are correctly configured for the gMSA.

To use a Config as a credential spec, first create the Docker Config containing the credential spec:

$ docker config create credspec credspec.json
+

Now, you should have a Docker Config named credspec, and you can create a service using this credential spec. To do so, use the --credential-spec flag with the config name, like this:

$ docker service create --credential-spec="config://credspec" <your image>
+

Your service will use the gMSA credential spec when it starts, but unlike a typical Docker Config (used by passing the --config flag), the credential spec will not be mounted into the container.

Create a service using an image on a private registry

If your image is available on a private registry which requires login, use the --with-registry-auth flag with docker service create, after logging in. If your image is stored on registry.example.com, which is a private registry, use a command like the following:

$ docker login registry.example.com
+
+$ docker service  create \
+  --with-registry-auth \
+  --name my_service \
+  registry.example.com/acme/my_image:latest
+

This passes the login token from your local client to the swarm nodes where the service is deployed, using the encrypted WAL logs. With this information, the nodes are able to log into the registry and pull the image.

Provide credential specs for managed service accounts

In Enterprise Edition 3.0, security is improved through the centralized distribution and management of Group Managed Service Account(gMSA) credentials using Docker Config functionality. Swarm now allows using a Docker Config as a gMSA credential spec, which reduces the burden of distributing credential specs to the nodes on which they are used.

Note: This option is only applicable to services using Windows containers.

Credential spec files are applied at runtime, eliminating the need for host-based credential spec files or registry entries - no gMSA credentials are written to disk on worker nodes. You can make credential specs available to Docker Engine running swarm kit worker nodes before a container starts. When deploying a service using a gMSA-based config, the credential spec is passed directly to the runtime of containers in that service.

The --credential-spec must be one of the following formats:

The following simple example retrieves the gMSA name and JSON contents from your Active Directory (AD) instance:

$ name="mygmsa"
+$ contents="{...}"
+$ echo $contents > contents.json
+

Make sure that the nodes to which you are deploying are correctly configured for the gMSA.

To use a Config as a credential spec, create a Docker Config in a credential spec file named credpspec.json. You can specify any name for the name of the config.

$ docker config create --label com.docker.gmsa.name=mygmsa credspec credspec.json
+

Now you can create a service using this credential spec. Specify the --credential-spec flag with the config name:

$ docker service create --credential-spec="config://credspec" <your image>
+

Your service uses the gMSA credential spec when it starts, but unlike a typical Docker Config (used by passing the --config flag), the credential spec is not mounted into the container.

Update a service

You can change almost everything about an existing service using the docker service update command. When you update a service, Docker stops its containers and restarts them with the new configuration.

Since Nginx is a web service, it works much better if you publish port 80 to clients outside the swarm. You can specify this when you create the service, using the -p or --publish flag. When updating an existing service, the flag is --publish-add. There is also a --publish-rm flag to remove a port that was previously published.

Assuming that the my_web service from the previous section still exists, use the following command to update it to publish port 80.

$ docker service update --publish-add 80 my_web
+

To verify that it worked, use docker service ls:

$ docker service ls
+
+ID                  NAME                MODE                REPLICAS            IMAGE                                                                                             PORTS
+4nhxl7oxw5vz        my_web              replicated          1/1                 docker.io/library/nginx@sha256:41ad9967ea448d7c2b203c699b429abe1ed5af331cd92533900c6d77490e0268   *:0->80/tcp
+

For more information on how publishing ports works, see publish ports.

You can update almost every configuration detail about an existing service, including the image name and tag it runs. See Update a service’s image after creation.

Remove a service

To remove a service, use the docker service remove command. You can remove a service by its ID or name, as shown in the output of the docker service ls command. The following command removes the my_web service.

$ docker service remove my_web
+

Service configuration details

The following sections provide details about service configuration. This topic does not cover every flag or scenario. In almost every instance where you can define a configuration at service creation, you can also update an existing service’s configuration in a similar way.

See the command-line references for docker service create and docker service update, or run one of those commands with the --help flag.

Configure the runtime environment

You can configure the following options for the runtime environment in the container:

The following service’s containers have an environment variable $MYVAR set to myvalue, run from the /tmp/ directory, and run as the my_user user.

$ docker service create --name helloworld \
+  --env MYVAR=myvalue \
+  --workdir /tmp \
+  --user my_user \
+  alpine ping docker.com
+

Update the command an existing service runs

To update the command an existing service runs, you can use the --args flag. The following example updates an existing service called helloworld so that it runs the command ping docker.com instead of whatever command it was running before:

$ docker service update --args "ping docker.com" helloworld
+

Specify the image version a service should use

When you create a service without specifying any details about the version of the image to use, the service uses the version tagged with the latest tag. You can force the service to use a specific version of the image in a few different ways, depending on your desired outcome.

An image version can be expressed in several different ways:

When you create a service, the image’s tag is resolved to the specific digest the tag points to at the time of service creation. Worker nodes for that service use that specific digest forever unless the service is explicitly updated. This feature is particularly important if you do use often-changing tags such as latest, because it ensures that all service tasks use the same version of the image.

Note: If content trust is enabled, the client actually resolves the image’s tag to a digest before contacting the swarm manager, to verify that the image is signed. Thus, if you use content trust, the swarm manager receives the request pre-resolved. In this case, if the client cannot resolve the image to a digest, the request fails.

If the manager can’t resolve the tag to a digest, each worker node is responsible for resolving the tag to a digest, and different nodes may use different versions of the image. If this happens, a warning like the following is logged, substituting the placeholders for real information.

unable to pin image <IMAGE-NAME> to digest: <REASON>
+

To see an image’s current digest, issue the command docker inspect <IMAGE>:<TAG> and look for the RepoDigests line. The following is the current digest for ubuntu:latest at the time this content was written. The output is truncated for clarity.

$ docker inspect ubuntu:latest
+
"RepoDigests": [
+    "ubuntu@sha256:35bc48a1ca97c3971611dc4662d08d131869daa692acb281c7e9e052924e38b1"
+],
+

After you create a service, its image is never updated unless you explicitly run docker service update with the --image flag as described below. Other update operations such as scaling the service, adding or removing networks or volumes, renaming the service, or any other type of update operation do not update the service’s image.

Update a service’s image after creation

Each tag represents a digest, similar to a Git hash. Some tags, such as latest, are updated often to point to a new digest. Others, such as ubuntu:16.04, represent a released software version and are not expected to update to point to a new digest often if at all. When you create a service, it is constrained to create tasks using a specific digest of an image until you update the service using service update with the --image flag.

When you run service update with the --image flag, the swarm manager queries Docker Hub or your private Docker registry for the digest the tag currently points to and updates the service tasks to use that digest.

Note: If you use content trust, the Docker client resolves image and the swarm manager receives the image and digest, rather than a tag.

Usually, the manager can resolve the tag to a new digest and the service updates, redeploying each task to use the new image. If the manager can’t resolve the tag or some other problem occurs, the next two sections outline what to expect.

If the manager resolves the tag

If the swarm manager can resolve the image tag to a digest, it instructs the worker nodes to redeploy the tasks and use the image at that digest.

If the manager cannot resolve the tag

If the swarm manager cannot resolve the image to a digest, all is not lost:

Publish ports

When you create a swarm service, you can publish that service’s ports to hosts outside the swarm in two ways:

Keep reading for more information and use cases for each of these methods.

Publish a service’s ports using the routing mesh

To publish a service’s ports externally to the swarm, use the --publish <PUBLISHED-PORT>:<SERVICE-PORT> flag. The swarm makes the service accessible at the published port on every swarm node. If an external host connects to that port on any swarm node, the routing mesh routes it to a task. The external host does not need to know the IP addresses or internally-used ports of the service tasks to interact with the service. When a user or process connects to a service, any worker node running a service task may respond. For more details about swarm service networking, see Manage swarm service networks.

Example: Run a three-task Nginx service on 10-node swarm

Imagine that you have a 10-node swarm, and you deploy an Nginx service running three tasks on a 10-node swarm:

$ docker service create --name my_web \
+                        --replicas 3 \
+                        --publish published=8080,target=80 \
+                        nginx
+

Three tasks run on up to three nodes. You don’t need to know which nodes are running the tasks; connecting to port 8080 on any of the 10 nodes connects you to one of the three nginx tasks. You can test this using curl. The following example assumes that localhost is one of the swarm nodes. If this is not the case, or localhost does not resolve to an IP address on your host, substitute the host’s IP address or resolvable host name.

The HTML output is truncated:

$ curl localhost:8080
+
+<!DOCTYPE html>
+<html>
+<head>
+<title>Welcome to nginx!</title>
+...truncated...
+</html>
+

Subsequent connections may be routed to the same swarm node or a different one.

Publish a service’s ports directly on the swarm node

Using the routing mesh may not be the right choice for your application if you need to make routing decisions based on application state or you need total control of the process for routing requests to your service’s tasks. To publish a service’s port directly on the node where it is running, use the mode=host option to the --publish flag.

Note: If you publish a service’s ports directly on the swarm node using mode=host and also set published=<PORT> this creates an implicit limitation that you can only run one task for that service on a given swarm node. You can work around this by specifying published without a port definition, which causes Docker to assign a random port for each task.

In addition, if you use mode=host and you do not use the --mode=global flag on docker service create, it is difficult to know which nodes are running the service to route work to them.

Example: Run an nginx web server service on every swarm node

nginx is an open source reverse proxy, load balancer, HTTP cache, and a web server. If you run nginx as a service using the routing mesh, connecting to the nginx port on any swarm node shows you the web page for (effectively) a random swarm node running the service.

The following example runs nginx as a service on each node in your swarm and exposes nginx port locally on each swarm node.

$ docker service create \
+  --mode global \
+  --publish mode=host,target=80,published=8080 \
+  --name=nginx \
+  nginx:latest
+

You can reach the nginx server on port 8080 of every swarm node. If you add a node to the swarm, a nginx task is started on it. You cannot start another service or container on any swarm node which binds to port 8080.

Note: This is a naive example. Creating an application-layer routing framework for a multi-tiered service is complex and out of scope for this topic.

Connect the service to an overlay network

You can use overlay networks to connect one or more services within the swarm.

First, create overlay network on a manager node using the docker network create command with the --driver overlay flag.

$ docker network create --driver overlay my-network
+

After you create an overlay network in swarm mode, all manager nodes have access to the network.

You can create a new service and pass the --network flag to attach the service to the overlay network:

$ docker service create \
+  --replicas 3 \
+  --network my-network \
+  --name my-web \
+  nginx
+

The swarm extends my-network to each node running the service.

You can also connect an existing service to an overlay network using the --network-add flag.

$ docker service update --network-add my-network my-web
+

To disconnect a running service from a network, use the --network-rm flag.

$ docker service update --network-rm my-network my-web
+

For more information on overlay networking and service discovery, refer to Attach services to an overlay network and Docker swarm mode overlay network security model.

Grant a service access to secrets

To create a service with access to Docker-managed secrets, use the --secret flag. For more information, see Manage sensitive strings (secrets) for Docker services

Customize a service’s isolation mode

Docker allows you to specify a swarm service’s isolation mode. This setting applies to Windows hosts only and is ignored for Linux hosts. The isolation mode can be one of the following:

You can specify the isolation mode when creating or updating a new service using the --isolation flag.

Control service placement

Swarm services provide a few different ways for you to control scale and placement of services on different nodes.

Replicated or global services

Swarm mode has two types of services: replicated and global. For replicated services, you specify the number of replica tasks for the swarm manager to schedule onto available nodes. For global services, the scheduler places one task on each available node that meets the service’s placement constraints and resource requirements.

You control the type of service using the --mode flag. If you don’t specify a mode, the service defaults to replicated. For replicated services, you specify the number of replica tasks you want to start using the --replicas flag. For example, to start a replicated nginx service with 3 replica tasks:

$ docker service create \
+  --name my_web \
+  --replicas 3 \
+  nginx
+

To start a global service on each available node, pass --mode global to docker service create. Every time a new node becomes available, the scheduler places a task for the global service on the new node. For example to start a service that runs alpine on every node in the swarm:

$ docker service create \
+  --name myservice \
+  --mode global \
+  alpine top
+

Service constraints let you set criteria for a node to meet before the scheduler deploys a service to the node. You can apply constraints to the service based upon node attributes and metadata or engine metadata. For more information on constraints, refer to the docker service create CLI reference.

Reserve memory or CPUs for a service

To reserve a given amount of memory or number of CPUs for a service, use the --reserve-memory or --reserve-cpu flags. If no available nodes can satisfy the requirement (for instance, if you request 4 CPUs and no node in the swarm has 4 CPUs), the service remains in a pending state until an appropriate node is available to run its tasks.

Out Of Memory Exceptions (OOME)

If your service attempts to use more memory than the swarm node has available, you may experience an Out Of Memory Exception (OOME) and a container, or the Docker daemon, might be killed by the kernel OOM killer. To prevent this from happening, ensure that your application runs on hosts with adequate memory and see Understand the risks of running out of memory.

Swarm services allow you to use resource constraints, placement preferences, and labels to ensure that your service is deployed to the appropriate swarm nodes.

Placement constraints

Use placement constraints to control the nodes a service can be assigned to. In the following example, the service only runs on nodes with the label region set to east. If no appropriately-labelled nodes are available, tasks will wait in Pending until they become available. The --constraint flag uses an equality operator (== or !=). For replicated services, it is possible that all services run on the same node, or each node only runs one replica, or that some nodes don’t run any replicas. For global services, the service runs on every node that meets the placement constraint and any resource requirements.

$ docker service create \
+  --name my-nginx \
+  --replicas 5 \
+  --constraint node.labels.region==east \
+  nginx
+

You can also use the constraint service-level key in a docker-compose.yml file.

If you specify multiple placement constraints, the service only deploys onto nodes where they are all met. The following example limits the service to run on all nodes where region is set to east and type is not set to devel:

$ docker service create \
+  --name my-nginx \
+  --mode global \
+  --constraint node.labels.region==east \
+  --constraint node.labels.type!=devel \
+  nginx
+

You can also use placement constraints in conjunction with placement preferences and CPU/memory constraints. Be careful not to use settings that are not possible to fulfill.

For more information on constraints, refer to the docker service create CLI reference.

Placement preferences

While placement constraints limit the nodes a service can run on, placement preferences try to place tasks on appropriate nodes in an algorithmic way (currently, only spread evenly). For instance, if you assign each node a rack label, you can set a placement preference to spread the service evenly across nodes with the rack label, by value. This way, if you lose a rack, the service is still running on nodes on other racks.

Placement preferences are not strictly enforced. If no node has the label you specify in your preference, the service is deployed as though the preference were not set.

Placement preferences are ignored for global services.

The following example sets a preference to spread the deployment across nodes based on the value of the datacenter label. If some nodes have datacenter=us-east and others have datacenter=us-west, the service is deployed as evenly as possible across the two sets of nodes.

$ docker service create \
+  --replicas 9 \
+  --name redis_2 \
+  --placement-pref 'spread=node.labels.datacenter' \
+  redis:3.0.6
+

Missing or null labels

Nodes which are missing the label used to spread still receive task assignments. As a group, these nodes receive tasks in equal proportion to any of the other groups identified by a specific label value. In a sense, a missing label is the same as having the label with a null value attached to it. If the service should only run on nodes with the label being used for the spread preference, the preference should be combined with a constraint.

You can specify multiple placement preferences, and they are processed in the order they are encountered. The following example sets up a service with multiple placement preferences. Tasks are spread first over the various datacenters, and then over racks (as indicated by the respective labels):

$ docker service create \
+  --replicas 9 \
+  --name redis_2 \
+  --placement-pref 'spread=node.labels.datacenter' \
+  --placement-pref 'spread=node.labels.rack' \
+  redis:3.0.6
+

You can also use placement preferences in conjunction with placement constraints or CPU/memory constraints. Be careful not to use settings that are not possible to fulfill.

This diagram illustrates how placement preferences work:

placement preferences example

When updating a service with docker service update, --placement-pref-add appends a new placement preference after all existing placement preferences. --placement-pref-rm removes an existing placement preference that matches the argument.

Configure a service’s update behavior

When you create a service, you can specify a rolling update behavior for how the swarm should apply changes to the service when you run docker service update. You can also specify these flags as part of the update, as arguments to docker service update.

The --update-delay flag configures the time delay between updates to a service task or sets of tasks. You can describe the time T as a combination of the number of seconds Ts, minutes Tm, or hours Th. So 10m30s indicates a 10 minute 30 second delay.

By default the scheduler updates 1 task at a time. You can pass the --update-parallelism flag to configure the maximum number of service tasks that the scheduler updates simultaneously.

When an update to an individual task returns a state of RUNNING, the scheduler continues the update by continuing to another task until all tasks are updated. If, at any time during an update a task returns FAILED, the scheduler pauses the update. You can control the behavior using the --update-failure-action flag for docker service create or docker service update.

In the example service below, the scheduler applies updates to a maximum of 2 replicas at a time. When an updated task returns either RUNNING or FAILED, the scheduler waits 10 seconds before stopping the next task to update:

$ docker service create \
+  --replicas 10 \
+  --name my_web \
+  --update-delay 10s \
+  --update-parallelism 2 \
+  --update-failure-action continue \
+  alpine
+

The --update-max-failure-ratio flag controls what fraction of tasks can fail during an update before the update as a whole is considered to have failed. For example, with --update-max-failure-ratio 0.1 --update-failure-action pause, after 10% of the tasks being updated fail, the update is paused.

An individual task update is considered to have failed if the task doesn’t start up, or if it stops running within the monitoring period specified with the --update-monitor flag. The default value for --update-monitor is 30 seconds, which means that a task failing in the first 30 seconds after its started counts towards the service update failure threshold, and a failure after that is not counted.

Roll back to the previous version of a service

In case the updated version of a service doesn’t function as expected, it’s possible to manually roll back to the previous version of the service using docker service update’s --rollback flag. This reverts the service to the configuration that was in place before the most recent docker service update command.

Other options can be combined with --rollback; for example, --update-delay 0s to execute the rollback without a delay between tasks:

$ docker service update \
+  --rollback \
+  --update-delay 0s
+  my_web
+

You can configure a service to roll back automatically if a service update fails to deploy. See Automatically roll back if an update fails.

Manual rollback is handled at the server side, which allows manually-initiated rollbacks to respect the new rollback parameters. Note that --rollback cannot be used in conjunction with other flags to docker service update.

Automatically roll back if an update fails

You can configure a service in such a way that if an update to the service causes redeployment to fail, the service can automatically roll back to the previous configuration. This helps protect service availability. You can set one or more of the following flags at service creation or update. If you do not set a value, the default is used.

Flag Default Description
--rollback-delay 0s Amount of time to wait after rolling back a task before rolling back the next one. A value of 0 means to roll back the second task immediately after the first rolled-back task deploys.
--rollback-failure-action pause When a task fails to roll back, whether to pause or continue trying to roll back other tasks.
--rollback-max-failure-ratio 0 The failure rate to tolerate during a rollback, specified as a floating-point number between 0 and 1. For instance, given 5 tasks, a failure ratio of .2 would tolerate one task failing to roll back. A value of 0 means no failure are tolerated, while a value of 1 means any number of failure are tolerated.
--rollback-monitor 5s Duration after each task rollback to monitor for failure. If a task stops before this time period has elapsed, the rollback is considered to have failed.
--rollback-parallelism 1 The maximum number of tasks to roll back in parallel. By default, one task is rolled back at a time. A value of 0 causes all tasks to be rolled back in parallel.

The following example configures a redis service to roll back automatically if a docker service update fails to deploy. Two tasks can be rolled back in parallel. Tasks are monitored for 20 seconds after rollback to be sure they do not exit, and a maximum failure ratio of 20% is tolerated. Default values are used for --rollback-delay and --rollback-failure-action.

$ docker service create --name=my_redis \
+                        --replicas=5 \
+                        --rollback-parallelism=2 \
+                        --rollback-monitor=20s \
+                        --rollback-max-failure-ratio=.2 \
+                        redis:latest
+

Give a service access to volumes or bind mounts

For best performance and portability, you should avoid writing important data directly into a container’s writable layer, instead using data volumes or bind mounts. This principle also applies to services.

You can create two types of mounts for services in a swarm, volume mounts or bind mounts. Regardless of which type of mount you use, configure it using the --mount flag when you create a service, or the --mount-add or --mount-rm flag when updating an existing service. The default is a data volume if you don’t specify a type.

Data volumes

Data volumes are storage that exist independently of a container. The lifecycle of data volumes under swarm services is similar to that under containers. Volumes outlive tasks and services, so their removal must be managed separately. Volumes can be created before deploying a service, or if they don’t exist on a particular host when a task is scheduled there, they are created automatically according to the volume specification on the service.

To use existing data volumes with a service use the --mount flag:

$ docker service create \
+  --mount src=<VOLUME-NAME>,dst=<CONTAINER-PATH> \
+  --name myservice \
+  <IMAGE>
+

If a volume with the same <VOLUME-NAME> does not exist when a task is scheduled to a particular host, then one is created. The default volume driver is local. To use a different volume driver with this create-on-demand pattern, specify the driver and its options with the --mount flag:

$ docker service create \
+  --mount type=volume,src=<VOLUME-NAME>,dst=<CONTAINER-PATH>,volume-driver=<DRIVER>,volume-opt=<KEY0>=<VALUE0>,volume-opt=<KEY1>=<VALUE1>
+  --name myservice \
+  <IMAGE>
+

For more information on how to create data volumes and the use of volume drivers, see Use volumes.

Bind mounts

Bind mounts are file system paths from the host where the scheduler deploys the container for the task. Docker mounts the path into the container. The file system path must exist before the swarm initializes the container for the task.

The following examples show bind mount syntax:

Important: Bind mounts can be useful but they can also cause problems. In most cases, it is recommended that you architect your application such that mounting paths from the host is unnecessary. The main risks include the following:

Create services using templates

You can use templates for some flags of service create, using the syntax provided by the Go’s text/template package.

The following flags are supported:

Valid placeholders for the Go template are:

Placeholder Description
.Service.ID Service ID
.Service.Name Service name
.Service.Labels Service labels
.Node.ID Node ID
.Node.Hostname Node hostname
.Task.Name Task name
.Task.Slot Task slot

Template example

This example sets the template of the created containers based on the service’s name and the ID of the node where the container is running:

$ docker service create --name hosttempl \
+                        --hostname="{{.Node.ID}}-{{.Service.Name}}"\
+                         busybox top
+

To see the result of using the template, use the docker service ps and docker inspect commands.

$ docker service ps va8ew30grofhjoychbr6iot8c
+
+ID            NAME         IMAGE                                                                                   NODE          DESIRED STATE  CURRENT STATE               ERROR  PORTS
+wo41w8hg8qan  hosttempl.1  busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912  2e7a8a9c4da2  Running        Running about a minute ago
+
$ docker inspect --format="{{.Config.Hostname}}" hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj
+

Learn More

+

guide, swarm mode, swarm, service

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/services/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fstack-deploy%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fstack-deploy%2Findex.html new file mode 100644 index 00000000..ee0e74c9 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fstack-deploy%2Findex.html @@ -0,0 +1,127 @@ +

Deploy a stack to a swarm

+ +

When running Docker Engine in swarm mode, you can use docker stack deploy to deploy a complete application stack to the swarm. The deploy command accepts a stack description in the form of a Compose file.

The docker stack deploy command supports any Compose file of version “3.0” or above. If you have an older version, see the upgrade guide.

To run through this tutorial, you need:

  1. A Docker Engine running in swarm mode. If you’re not familiar with swarm mode, you might want to read Swarm mode key concepts and How services work.

    Note

    If you’re trying things out on a local development environment, you can put your engine into swarm mode with docker swarm init.

    If you’ve already got a multi-node swarm running, keep in mind that all docker stack and docker service commands must be run from a manager node.

  2. A current version of Docker Compose.

Set up a Docker registry

Because a swarm consists of multiple Docker Engines, a registry is required to distribute images to all of them. You can use the Docker Hub or maintain your own. Here’s how to create a throwaway registry, which you can discard afterward.

  1. Start the registry as a service on your swarm:

    $ docker service create --name registry --publish published=5000,target=5000 registry:2
    +
  2. Check its status with docker service ls:

    $ docker service ls
    +
    +ID            NAME      REPLICAS  IMAGE                                                                               COMMAND
    +l7791tpuwkco  registry  1/1       registry:2@sha256:1152291c7f93a4ea2ddc95e46d142c31e743b6dd70e194af9e6ebe530f782c17
    +

    Once it reads 1/1 under REPLICAS, it’s running. If it reads 0/1, it’s probably still pulling the image.

  3. Check that it’s working with curl:

    $ curl http://localhost:5000/v2/
    +
    +{}
    +

Create the example application

The app used in this guide is based on the hit counter app in the Get started with Docker Compose guide. It consists of a Python app which maintains a counter in a Redis instance and increments the counter whenever you visit it.

  1. Create a directory for the project:

    $ mkdir stackdemo
    +$ cd stackdemo
    +
  2. Create a file called app.py in the project directory and paste this in:

    from flask import Flask
    +from redis import Redis
    +
    +app = Flask(__name__)
    +redis = Redis(host='redis', port=6379)
    +
    +@app.route('/')
    +def hello():
    +    count = redis.incr('hits')
    +    return 'Hello World! I have been seen {} times.\n'.format(count)
    +
    +if __name__ == "__main__":
    +    app.run(host="0.0.0.0", port=8000, debug=True)
    +
  3. Create a file called requirements.txt and paste these two lines in:

    flask
    +redis
    +
  4. Create a file called Dockerfile and paste this in:

    # syntax=docker/dockerfile:1
    +FROM python:3.4-alpine
    +ADD . /code
    +WORKDIR /code
    +RUN pip install -r requirements.txt
    +CMD ["python", "app.py"]
    +
  5. Create a file called docker-compose.yml and paste this in:

    version: "3.9"
    +
    +services:
    +  web:
    +    image: 127.0.0.1:5000/stackdemo
    +    build: .
    +    ports:
    +      - "8000:8000"
    +  redis:
    +    image: redis:alpine
    +

    The image for the web app is built using the Dockerfile defined above. It’s also tagged with 127.0.0.1:5000 - the address of the registry created earlier. This is important when distributing the app to the swarm.

Test the app with Compose

  1. Start the app with docker-compose up. This builds the web app image, pulls the Redis image if you don’t already have it, and creates two containers.

    You see a warning about the Engine being in swarm mode. This is because Compose doesn’t take advantage of swarm mode, and deploys everything to a single node. You can safely ignore this.

    $ docker-compose up -d
    +
    +WARNING: The Docker Engine you're using is running in swarm mode.
    +
    +Compose does not use swarm mode to deploy services to multiple nodes in
    +a swarm. All containers are scheduled on the current node.
    +
    +To deploy your application across the swarm, use `docker stack deploy`.
    +
    +Creating network "stackdemo_default" with the default driver
    +Building web
    +...(build output)...
    +Creating stackdemo_redis_1
    +Creating stackdemo_web_1
    +
  2. Check that the app is running with docker-compose ps:

    $ docker-compose ps
    +
    +      Name                     Command               State           Ports
    +-----------------------------------------------------------------------------------
    +stackdemo_redis_1   docker-entrypoint.sh redis ...   Up      6379/tcp
    +stackdemo_web_1     python app.py                    Up      0.0.0.0:8000->8000/tcp
    +

    You can test the app with curl:

    $ curl http://localhost:8000
    +Hello World! I have been seen 1 times.
    +
    +$ curl http://localhost:8000
    +Hello World! I have been seen 2 times.
    +
    +$ curl http://localhost:8000
    +Hello World! I have been seen 3 times.
    +
  3. Bring the app down:

    $ docker-compose down --volumes
    +
    +Stopping stackdemo_web_1 ... done
    +Stopping stackdemo_redis_1 ... done
    +Removing stackdemo_web_1 ... done
    +Removing stackdemo_redis_1 ... done
    +Removing network stackdemo_default
    +

Push the generated image to the registry

To distribute the web app’s image across the swarm, it needs to be pushed to the registry you set up earlier. With Compose, this is very simple:

$ docker-compose push
+
+Pushing web (127.0.0.1:5000/stackdemo:latest)...
+The push refers to a repository [127.0.0.1:5000/stackdemo]
+5b5a49501a76: Pushed
+be44185ce609: Pushed
+bd7330a79bcf: Pushed
+c9fc143a069a: Pushed
+011b303988d2: Pushed
+latest: digest: sha256:a81840ebf5ac24b42c1c676cbda3b2cb144580ee347c07e1bc80e35e5ca76507 size: 1372
+

The stack is now ready to be deployed.

Deploy the stack to the swarm

  1. Create the stack with docker stack deploy:

    $ docker stack deploy --compose-file docker-compose.yml stackdemo
    +
    +Ignoring unsupported options: build
    +
    +Creating network stackdemo_default
    +Creating service stackdemo_web
    +Creating service stackdemo_redis
    +

    The last argument is a name for the stack. Each network, volume and service name is prefixed with the stack name.

  2. Check that it’s running with docker stack services stackdemo:

    $ docker stack services stackdemo
    +
    +ID            NAME             MODE        REPLICAS  IMAGE
    +orvjk2263y1p  stackdemo_redis  replicated  1/1       redis:3.2-alpine@sha256:f1ed3708f538b537eb9c2a7dd50dc90a706f7debd7e1196c9264edeea521a86d
    +s1nf0xy8t1un  stackdemo_web    replicated  1/1       127.0.0.1:5000/stackdemo@sha256:adb070e0805d04ba2f92c724298370b7a4eb19860222120d43e0f6351ddbc26f
    +

    Once it’s running, you should see 1/1 under REPLICAS for both services. This might take some time if you have a multi-node swarm, as images need to be pulled.

    As before, you can test the app with curl:

    $ curl http://localhost:8000
    +Hello World! I have been seen 1 times.
    +
    +$ curl http://localhost:8000
    +Hello World! I have been seen 2 times.
    +
    +$ curl http://localhost:8000
    +Hello World! I have been seen 3 times.
    +

    Thanks to Docker’s built-in routing mesh, you can access any node in the swarm on port 8000 and get routed to the app:

    $ curl http://address-of-other-node:8000
    +Hello World! I have been seen 4 times.
    +
  3. Bring the stack down with docker stack rm:

    $ docker stack rm stackdemo
    +
    +Removing service stackdemo_web
    +Removing service stackdemo_redis
    +Removing network stackdemo_default
    +
  4. Bring the registry down with docker service rm:

    $ docker service rm registry
    +
  5. If you’re just testing things out on a local machine and want to bring your Docker Engine out of swarm mode, use docker swarm leave:

    $ docker swarm leave --force
    +
    +Node left the swarm.
    +
+

guide, swarm mode, composefile, stack, compose, deploy

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/stack-deploy/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-mode%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-mode%2Findex.html new file mode 100644 index 00000000..c84079cb --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-mode%2Findex.html @@ -0,0 +1,55 @@ +

Run Docker Engine in swarm mode

+ +

When you first install and start working with Docker Engine, swarm mode is disabled by default. When you enable swarm mode, you work with the concept of services managed through the docker service command.

There are two ways to run the Engine in swarm mode:

When you run the Engine in swarm mode on your local machine, you can create and test services based upon images you’ve created or other available images. In your production environment, swarm mode provides a fault-tolerant platform with cluster management features to keep your services running and available.

These instructions assume you have installed the Docker Engine 1.12 or later on a machine to serve as a manager node in your swarm.

If you haven’t already, read through the swarm mode key concepts and try the swarm mode tutorial.

Create a swarm

When you run the command to create a swarm, the Docker Engine starts running in swarm mode.

Run docker swarm init to create a single-node swarm on the current node. The Engine sets up the swarm as follows:

The output for docker swarm init provides the connection command to use when you join new worker nodes to the swarm:

$ docker swarm init
+Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager.
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
+    192.168.99.100:2377
+
+To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
+

Configuring default address pools

By default Docker Swarm uses a default address pool 10.0.0.0/8 for global scope (overlay) networks. Every network that does not have a subnet specified will have a subnet sequentially allocated from this pool. In some circumstances it may be desirable to use a different default IP address pool for networks.

For example, if the default 10.0.0.0/8 range conflicts with already allocated address space in your network, then it is desirable to ensure that networks use a different range without requiring Swarm users to specify each subnet with the --subnet command.

To configure custom default address pools, you must define pools at Swarm initialization using the --default-addr-pool command line option. This command line option uses CIDR notation for defining the subnet mask. To create the custom address pool for Swarm, you must define at least one default address pool, and an optional default address pool subnet mask. For example, for the 10.0.0.0/27, use the value 27.

Docker allocates subnet addresses from the address ranges specified by the --default-addr-pool option. For example, a command line option --default-addr-pool 10.10.0.0/16 indicates that Docker will allocate subnets from that /16 address range. If --default-addr-pool-mask-len were unspecified or set explicitly to 24, this would result in 256 /24 networks of the form 10.10.X.0/24.

The subnet range comes from the --default-addr-pool, (such as 10.10.0.0/16). The size of 16 there represents the number of networks one can create within that default-addr-pool range. The --default-addr-pool option may occur multiple times with each option providing additional addresses for docker to use for overlay subnets.

The format of the command is:

$ docker swarm init --default-addr-pool <IP range in CIDR> [--default-addr-pool <IP range in CIDR> --default-addr-pool-mask-length <CIDR value>]
+

To create a default IP address pool with a /16 (class B) for the 10.20.0.0 network looks like this:

$ docker swarm init --default-addr-pool 10.20.0.0/16
+

To create a default IP address pool with a /16 (class B) for the 10.20.0.0 and 10.30.0.0 networks, and to create a subnet mask of /26 for each network looks like this:

$ docker swarm init --default-addr-pool 10.20.0.0/16 --default-addr-pool 10.30.0.0/16 --default-addr-pool-mask-length 26
+

In this example, docker network create -d overlay net1 will result in 10.20.0.0/26 as the allocated subnet for net1, and docker network create -d overlay net2 will result in 10.20.0.64/26 as the allocated subnet for net2. This continues until all the subnets are exhausted.

Refer to the following pages for more information:

Configure the advertise address

Manager nodes use an advertise address to allow other nodes in the swarm access to the Swarmkit API and overlay networking. The other nodes on the swarm must be able to access the manager node on its advertise address.

If you don’t specify an advertise address, Docker checks if the system has a single IP address. If so, Docker uses the IP address with the listening port 2377 by default. If the system has multiple IP addresses, you must specify the correct --advertise-addr to enable inter-manager communication and overlay networking:

$ docker swarm init --advertise-addr <MANAGER-IP>
+

You must also specify the --advertise-addr if the address where other nodes reach the first manager node is not the same address the manager sees as its own. For instance, in a cloud setup that spans different regions, hosts have both internal addresses for access within the region and external addresses that you use for access from outside that region. In this case, specify the external address with --advertise-addr so that the node can propagate that information to other nodes that subsequently connect to it.

Refer to the docker swarm init CLI reference for more detail on the advertise address.

View the join command or update a swarm join token

Nodes require a secret token to join the swarm. The token for worker nodes is different from the token for manager nodes. Nodes only use the join-token at the moment they join the swarm. Rotating the join token after a node has already joined a swarm does not affect the node’s swarm membership. Token rotation ensures an old token cannot be used by any new nodes attempting to join the swarm.

To retrieve the join command including the join token for worker nodes, run:

$ docker swarm join-token worker
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
+    192.168.99.100:2377
+
+This node joined a swarm as a worker.
+

To view the join command and token for manager nodes, run:

$ docker swarm join-token manager
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-59egwe8qangbzbqb3ryawxzk3jn97ifahlsrw01yar60pmkr90-bdjfnkcflhooyafetgjod97sz \
+    192.168.99.100:2377
+

Pass the --quiet flag to print only the token:

$ docker swarm join-token --quiet worker
+
+SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c
+

Be careful with the join tokens because they are the secrets necessary to join the swarm. In particular, checking a secret into version control is a bad practice because it would allow anyone with access to the application source code to add new nodes to the swarm. Manager tokens are especially sensitive because they allow a new manager node to join and gain control over the whole swarm.

We recommend that you rotate the join tokens in the following circumstances:

Additionally, it is a best practice to implement a regular rotation schedule for any secret including swarm join tokens. We recommend that you rotate your tokens at least every 6 months.

Run swarm join-token --rotate to invalidate the old token and generate a new token. Specify whether you want to rotate the token for worker or manager nodes:

$ docker swarm join-token  --rotate worker
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-2kscvs0zuymrsc9t0ocyy1rdns9dhaodvpl639j2bqx55uptag-ebmn5u927reawo27s3azntd44 \
+    192.168.99.100:2377
+

Learn more

+

guide, swarm mode, node

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-mode/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fadd-nodes%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fadd-nodes%2Findex.html new file mode 100644 index 00000000..1d53eff2 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fadd-nodes%2Findex.html @@ -0,0 +1,32 @@ +

Add nodes to the swarm

+ +

Once you’ve created a swarm with a manager node, you’re ready to add worker nodes.

  1. Open a terminal and ssh into the machine where you want to run a worker node. This tutorial uses the name worker1.

  2. Run the command produced by the docker swarm init output from the Create a swarm tutorial step to create a worker node joined to the existing swarm:

    $ docker swarm join \
    +  --token  SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
    +  192.168.99.100:2377
    +
    +This node joined a swarm as a worker.
    +

    If you don’t have the command available, you can run the following command on a manager node to retrieve the join command for a worker:

    $ docker swarm join-token worker
    +
    +To add a worker to this swarm, run the following command:
    +
    +    docker swarm join \
    +    --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
    +    192.168.99.100:2377
    +
  3. Open a terminal and ssh into the machine where you want to run a second worker node. This tutorial uses the name worker2.

  4. Run the command produced by the docker swarm init output from the Create a swarm tutorial step to create a second worker node joined to the existing swarm:

    $ docker swarm join \
    +  --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
    +  192.168.99.100:2377
    +
    +This node joined a swarm as a worker.
    +
  5. Open a terminal and ssh into the machine where the manager node runs and run the docker node ls command to see the worker nodes:

    $ docker node ls
    +ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
    +03g1y59jwfg7cf99w4lt0f662    worker2   Ready   Active
    +9j68exjopxe7wfl6yuxml7a7j    worker1   Ready   Active
    +dxn1zf6l61qsb1josjja83ngz *  manager1  Ready   Active        Leader
    +

    The MANAGER column identifies the manager nodes in the swarm. The empty status in this column for worker1 and worker2 identifies them as worker nodes.

    Swarm management commands like docker node ls only work on manager nodes.

What’s next?

Now your swarm consists of a manager and two worker nodes. In the next step of the tutorial, you deploy a service to the swarm.

+

tutorial, cluster management, swarm

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/add-nodes/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fcreate-swarm%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fcreate-swarm%2Findex.html new file mode 100644 index 00000000..fb108639 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fcreate-swarm%2Findex.html @@ -0,0 +1,40 @@ +

Create a swarm

+ +

After you complete the tutorial setup steps, you’re ready to create a swarm. Make sure the Docker Engine daemon is started on the host machines.

  1. Open a terminal and ssh into the machine where you want to run your manager node. This tutorial uses a machine named manager1. If you use Docker Machine, you can connect to it via SSH using the following command:

    $ docker-machine ssh manager1
    +
  2. Run the following command to create a new swarm:

    $ docker swarm init --advertise-addr <MANAGER-IP>
    +

    Note: If you are using Docker Desktop for Mac or Docker Desktop for Windows to test single-node swarm, simply run docker swarm init with no arguments. There is no need to specify --advertise-addr in this case. To learn more, see the topic on how to Use Docker Desktop for Mac or Docker Desktop for Windows with Swarm.

    In the tutorial, the following command creates a swarm on the manager1 machine:

    $ docker swarm init --advertise-addr 192.168.99.100
    +Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager.
    +
    +To add a worker to this swarm, run the following command:
    +
    +    docker swarm join \
    +    --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
    +    192.168.99.100:2377
    +
    +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
    +

    The --advertise-addr flag configures the manager node to publish its address as 192.168.99.100. The other nodes in the swarm must be able to access the manager at the IP address.

    The output includes the commands to join new nodes to the swarm. Nodes will join as managers or workers depending on the value for the --token flag.

  3. Run docker info to view the current state of the swarm:

    $ docker info
    +
    +Containers: 2
    +Running: 0
    +Paused: 0
    +Stopped: 2
    +  ...snip...
    +Swarm: active
    +  NodeID: dxn1zf6l61qsb1josjja83ngz
    +  Is Manager: true
    +  Managers: 1
    +  Nodes: 1
    +  ...snip...
    +
  4. Run the docker node ls command to view information about nodes:

    $ docker node ls
    +
    +ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
    +dxn1zf6l61qsb1josjja83ngz *  manager1  Ready   Active        Leader
    +
    +

    The * next to the node ID indicates that you’re currently connected on this node.

    Docker Engine swarm mode automatically names the node for the machine host name. The tutorial covers other columns in later steps.

What’s next?

In the next section of the tutorial, we add two more nodes to the cluster.

+

tutorial, cluster management, swarm mode

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdelete-service%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdelete-service%2Findex.html new file mode 100644 index 00000000..3587719b --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdelete-service%2Findex.html @@ -0,0 +1,27 @@ +

Delete the service running on the swarm

+

The remaining steps in the tutorial don’t use the helloworld service, so now you can delete the service from the swarm.

  1. If you haven’t already, open a terminal and ssh into the machine where you run your manager node. For example, the tutorial uses a machine named manager1.

  2. Run docker service rm helloworld to remove the helloworld service.

    $ docker service rm helloworld
    +
    +helloworld
    +
  3. Run docker service inspect <SERVICE-ID> to verify that the swarm manager removed the service. The CLI returns a message that the service is not found:

    $ docker service inspect helloworld
    +[]
    +Error: no such service: helloworld
    +
  4. Even though the service no longer exists, the task containers take a few seconds to clean up. You can use docker ps on the nodes to verify when the tasks have been removed.

    $ docker ps
    +
    +CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS     NAMES
    +db1651f50347        alpine:latest       "ping docker.com"        44 minutes ago      Up 46 seconds                 helloworld.5.9lkmos2beppihw95vdwxy1j3w
    +43bf6e532a92        alpine:latest       "ping docker.com"        44 minutes ago      Up 46 seconds                 helloworld.3.a71i8rp6fua79ad43ycocl4t2
    +5a0fb65d8fa7        alpine:latest       "ping docker.com"        44 minutes ago      Up 45 seconds                 helloworld.2.2jpgensh7d935qdc857pxulfr
    +afb0ba67076f        alpine:latest       "ping docker.com"        44 minutes ago      Up 46 seconds                 helloworld.4.1c47o7tluz7drve4vkm2m5olx
    +688172d3bfaa        alpine:latest       "ping docker.com"        45 minutes ago      Up About a minute             helloworld.1.74nbhb3fhud8jfrhigd7s29we
    +
    +$ docker ps
    +CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS     NAMES
    +
    +

What’s next?

In the next step of the tutorial, you set up a new service and apply a rolling update.

+

tutorial, cluster management, swarm, service

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/delete-service/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdeploy-service%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdeploy-service%2Findex.html new file mode 100644 index 00000000..38aad034 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdeploy-service%2Findex.html @@ -0,0 +1,16 @@ +

Deploy a service to the swarm

+

After you create a swarm, you can deploy a service to the swarm. For this tutorial, you also added worker nodes, but that is not a requirement to deploy a service.

  1. Open a terminal and ssh into the machine where you run your manager node. For example, the tutorial uses a machine named manager1.

  2. Run the following command:

    $ docker service create --replicas 1 --name helloworld alpine ping docker.com
    +
    +9uk4639qpg7npwf3fn2aasksr
    +
    • The docker service create command creates the service.
    • The --name flag names the service helloworld.
    • The --replicas flag specifies the desired state of 1 running instance.
    • The arguments alpine ping docker.com define the service as an Alpine Linux container that executes the command ping docker.com.
  3. Run docker service ls to see the list of running services:

    $ docker service ls
    +
    +ID            NAME        SCALE  IMAGE   COMMAND
    +9uk4639qpg7n  helloworld  1/1    alpine  ping docker.com
    +

What’s next?

Now you’ve deployed a service to the swarm, you’re ready to inspect the service.

+

tutorial, cluster management, swarm mode

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/deploy-service/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdrain-node%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdrain-node%2Findex.html new file mode 100644 index 00000000..965efcb0 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fdrain-node%2Findex.html @@ -0,0 +1,54 @@ +

Drain a node on the swarm

+ +

In earlier steps of the tutorial, all the nodes have been running with ACTIVE availability. The swarm manager can assign tasks to any ACTIVE node, so up to now all nodes have been available to receive tasks.

Sometimes, such as planned maintenance times, you need to set a node to DRAIN availability. DRAIN availability prevents a node from receiving new tasks from the swarm manager. It also means the manager stops tasks running on the node and launches replica tasks on a node with ACTIVE availability.

Important: Setting a node to DRAIN does not remove standalone containers from that node, such as those created with docker run, docker-compose up, or the Docker Engine API. A node’s status, including DRAIN, only affects the node’s ability to schedule swarm service workloads.

  1. If you haven’t already, open a terminal and ssh into the machine where you run your manager node. For example, the tutorial uses a machine named manager1.

  2. Verify that all your nodes are actively available.

    $ docker node ls
    +
    +ID                           HOSTNAME  STATUS  AVAILABILITY  MANAGER STATUS
    +1bcef6utixb0l0ca7gxuivsj0    worker2   Ready   Active
    +38ciaotwjuritcdtn9npbnkuz    worker1   Ready   Active
    +e216jshn25ckzbvmwlnh5jr3g *  manager1  Ready   Active        Leader
    +
  3. If you aren’t still running the redis service from the rolling update tutorial, start it now:

    $ docker service create --replicas 3 --name redis --update-delay 10s redis:3.0.6
    +
    +c5uo6kdmzpon37mgj9mwglcfw
    +
  4. Run docker service ps redis to see how the swarm manager assigned the tasks to different nodes:

    $ docker service ps redis
    +
    +NAME                               IMAGE        NODE     DESIRED STATE  CURRENT STATE
    +redis.1.7q92v0nr1hcgts2amcjyqg3pq  redis:3.0.6  manager1 Running        Running 26 seconds
    +redis.2.7h2l8h3q3wqy5f66hlv9ddmi6  redis:3.0.6  worker1  Running        Running 26 seconds
    +redis.3.9bg7cezvedmkgg6c8yzvbhwsd  redis:3.0.6  worker2  Running        Running 26 seconds
    +

    In this case the swarm manager distributed one task to each node. You may see the tasks distributed differently among the nodes in your environment.

  5. Run docker node update --availability drain <NODE-ID> to drain a node that had a task assigned to it:

    $ docker node update --availability drain worker1
    +
    +worker1
    +
  6. Inspect the node to check its availability:

    $ docker node inspect --pretty worker1
    +
    +ID:			38ciaotwjuritcdtn9npbnkuz
    +Hostname:		worker1
    +Status:
    + State:			Ready
    + Availability:		Drain
    +...snip...
    +

    The drained node shows Drain for AVAILABILITY.

  7. Run docker service ps redis to see how the swarm manager updated the task assignments for the redis service:

    $ docker service ps redis
    +
    +NAME                                    IMAGE        NODE      DESIRED STATE  CURRENT STATE           ERROR
    +redis.1.7q92v0nr1hcgts2amcjyqg3pq       redis:3.0.6  manager1  Running        Running 4 minutes
    +redis.2.b4hovzed7id8irg1to42egue8       redis:3.0.6  worker2   Running        Running About a minute
    + \_ redis.2.7h2l8h3q3wqy5f66hlv9ddmi6   redis:3.0.6  worker1   Shutdown       Shutdown 2 minutes ago
    +redis.3.9bg7cezvedmkgg6c8yzvbhwsd       redis:3.0.6  worker2   Running        Running 4 minutes
    +

    The swarm manager maintains the desired state by ending the task on a node with Drain availability and creating a new task on a node with Active availability.

  8. Run docker node update --availability active <NODE-ID> to return the drained node to an active state:

    $ docker node update --availability active worker1
    +
    +worker1
    +
  9. Inspect the node to see the updated state:

    $ docker node inspect --pretty worker1
    +
    +ID:			38ciaotwjuritcdtn9npbnkuz
    +Hostname:		worker1
    +Status:
    + State:			Ready
    + Availability:		Active
    +...snip...
    +

    When you set the node back to Active availability, it can receive new tasks:

    • during a service update to scale up
    • during a rolling update
    • when you set another node to Drain availability
    • when a task fails on another active node

What’s next?

Learn how to use a swarm mode routing mesh.

+

tutorial, cluster management, swarm, service, drain

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/drain-node/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Findex.html new file mode 100644 index 00000000..5bde29d4 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Findex.html @@ -0,0 +1,13 @@ +

Getting started with swarm mode

+ +

This tutorial introduces you to the features of Docker Engine Swarm mode. You may want to familiarize yourself with the key concepts before you begin.

The tutorial guides you through the following activities:

This tutorial uses Docker Engine CLI commands entered on the command line of a terminal window.

If you are brand new to Docker, see About Docker Engine.

Set up

To run this tutorial, you need the following:

Three networked host machines

This tutorial requires three Linux hosts which have Docker installed and can communicate over a network. These can be physical machines, virtual machines, Amazon EC2 instances, or hosted in some other way. Check out Getting started - Swarms for one possible set-up for the hosts.

One of these machines is a manager (called manager1) and two of them are workers (worker1 and worker2).

Note: You can follow many of the tutorial steps to test single-node swarm as well, in which case you need only one host. Multi-node commands do not work, but you can initialize a swarm, create services, and scale them.

Install Docker Engine on Linux machines

If you are using Linux based physical computers or cloud-provided computers as hosts, simply follow the Linux install instructions for your platform. Spin up the three machines, and you are ready. You can test both single-node and multi-node swarm scenarios on Linux machines.

Use Docker Desktop for Mac or Docker Desktop for Windows

Alternatively, install the latest Docker Desktop for Mac or Docker Desktop for Windows application on one computer. You can test both single-node and multi-node swarm from this computer.

The IP address of the manager machine

The IP address must be assigned to a network interface available to the host operating system. All nodes in the swarm need to connect to the manager at the IP address.

Because other nodes contact the manager node on its IP address, you should use a fixed IP address.

You can run ifconfig on Linux or macOS to see a list of the available network interfaces.

The tutorial uses manager1 : 192.168.99.100.

Open protocols and ports between the hosts

The following ports must be available. On some systems, these ports are open by default.

If you plan on creating an overlay network with encryption (--opt encrypted), you also need to ensure ip protocol 50 (ESP) traffic is allowed.

What’s next?

After you have set up your environment, you are ready to create a swarm.

+

tutorial, cluster management, swarm mode

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Finspect-service%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Finspect-service%2Findex.html new file mode 100644 index 00000000..47a91000 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Finspect-service%2Findex.html @@ -0,0 +1,78 @@ +

Inspect a service on the swarm

+ +

When you have deployed a service to your swarm, you can use the Docker CLI to see details about the service running in the swarm.

  1. If you haven’t already, open a terminal and ssh into the machine where you run your manager node. For example, the tutorial uses a machine named manager1.

  2. Run docker service inspect --pretty <SERVICE-ID> to display the details about a service in an easily readable format.

    To see the details on the helloworld service:

    [manager1]$ docker service inspect --pretty helloworld
    +
    +ID:		9uk4639qpg7npwf3fn2aasksr
    +Name:		helloworld
    +Service Mode:	REPLICATED
    + Replicas:		1
    +Placement:
    +UpdateConfig:
    + Parallelism:	1
    +ContainerSpec:
    + Image:		alpine
    + Args:	ping docker.com
    +Resources:
    +Endpoint Mode:  vip
    +

    Tip: To return the service details in json format, run the same command without the --pretty flag.

    [manager1]$ docker service inspect helloworld
    +[
    +{
    +    "ID": "9uk4639qpg7npwf3fn2aasksr",
    +    "Version": {
    +        "Index": 418
    +    },
    +    "CreatedAt": "2016-06-16T21:57:11.622222327Z",
    +    "UpdatedAt": "2016-06-16T21:57:11.622222327Z",
    +    "Spec": {
    +        "Name": "helloworld",
    +        "TaskTemplate": {
    +            "ContainerSpec": {
    +                "Image": "alpine",
    +                "Args": [
    +                    "ping",
    +                    "docker.com"
    +                ]
    +            },
    +            "Resources": {
    +                "Limits": {},
    +                "Reservations": {}
    +            },
    +            "RestartPolicy": {
    +                "Condition": "any",
    +                "MaxAttempts": 0
    +            },
    +            "Placement": {}
    +        },
    +        "Mode": {
    +            "Replicated": {
    +                "Replicas": 1
    +            }
    +        },
    +        "UpdateConfig": {
    +            "Parallelism": 1
    +        },
    +        "EndpointSpec": {
    +            "Mode": "vip"
    +        }
    +    },
    +    "Endpoint": {
    +        "Spec": {}
    +    }
    +}
    +]
    +
  3. Run docker service ps <SERVICE-ID> to see which nodes are running the service:

    [manager1]$ docker service ps helloworld
    +
    +NAME                                    IMAGE   NODE     DESIRED STATE  CURRENT STATE           ERROR               PORTS
    +helloworld.1.8p1vev3fq5zm0mi8g0as41w35  alpine  worker2  Running        Running 3 minutes
    +

    In this case, the one instance of the helloworld service is running on the worker2 node. You may see the service running on your manager node. By default, manager nodes in a swarm can execute tasks just like worker nodes.

    Swarm also shows you the DESIRED STATE and CURRENT STATE of the service task so you can see if tasks are running according to the service definition.

  4. Run docker ps on the node where the task is running to see details about the container for the task.

    Tip: If helloworld is running on a node other than your manager node, you must ssh to that node.

    [worker2]$ docker ps
    +
    +CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
    +e609dde94e47        alpine:latest       "ping docker.com"   3 minutes ago       Up 3 minutes                            helloworld.1.8p1vev3fq5zm0mi8g0as41w35
    +

What’s next?

Next, you can change the scale for the service running in the swarm.

+

tutorial, cluster management, swarm mode

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/inspect-service/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Frolling-update%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Frolling-update%2Findex.html new file mode 100644 index 00000000..3091ea58 --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Frolling-update%2Findex.html @@ -0,0 +1,69 @@ +

Apply rolling updates to a service

+ +

In a previous step of the tutorial, you scaled the number of instances of a service. In this part of the tutorial, you deploy a service based on the Redis 3.0.6 container tag. Then you upgrade the service to use the Redis 3.0.7 container image using rolling updates.

  1. If you haven’t already, open a terminal and ssh into the machine where you run your manager node. For example, the tutorial uses a machine named manager1.

  2. Deploy your Redis tag to the swarm and configure the swarm with a 10 second update delay. Note that the following example shows an older Redis tag:

    $ docker service create \
    +  --replicas 3 \
    +  --name redis \
    +  --update-delay 10s \
    +  redis:3.0.6
    +
    +0u6a4s31ybk7yw2wyvtikmu50
    +

    You configure the rolling update policy at service deployment time.

    The --update-delay flag configures the time delay between updates to a service task or sets of tasks. You can describe the time T as a combination of the number of seconds Ts, minutes Tm, or hours Th. So 10m30s indicates a 10 minute 30 second delay.

    By default the scheduler updates 1 task at a time. You can pass the --update-parallelism flag to configure the maximum number of service tasks that the scheduler updates simultaneously.

    By default, when an update to an individual task returns a state of RUNNING, the scheduler schedules another task to update until all tasks are updated. If, at any time during an update a task returns FAILED, the scheduler pauses the update. You can control the behavior using the --update-failure-action flag for docker service create or docker service update.

  3. Inspect the redis service:

    $ docker service inspect --pretty redis
    +
    +ID:             0u6a4s31ybk7yw2wyvtikmu50
    +Name:           redis
    +Service Mode:   Replicated
    + Replicas:      3
    +Placement:
    + Strategy:	    Spread
    +UpdateConfig:
    + Parallelism:   1
    + Delay:         10s
    +ContainerSpec:
    + Image:         redis:3.0.6
    +Resources:
    +Endpoint Mode:  vip
    +
  4. Now you can update the container image for redis. The swarm manager applies the update to nodes according to the UpdateConfig policy:

    $ docker service update --image redis:3.0.7 redis
    +redis
    +

    The scheduler applies rolling updates as follows by default:

    • Stop the first task.
    • Schedule update for the stopped task.
    • Start the container for the updated task.
    • If the update to a task returns RUNNING, wait for the specified delay period then start the next task.
    • If, at any time during the update, a task returns FAILED, pause the update.
  5. Run docker service inspect --pretty redis to see the new image in the desired state:

    $ docker service inspect --pretty redis
    +
    +ID:             0u6a4s31ybk7yw2wyvtikmu50
    +Name:           redis
    +Service Mode:   Replicated
    + Replicas:      3
    +Placement:
    + Strategy:	    Spread
    +UpdateConfig:
    + Parallelism:   1
    + Delay:         10s
    +ContainerSpec:
    + Image:         redis:3.0.7
    +Resources:
    +Endpoint Mode:  vip
    +

    The output of service inspect shows if your update paused due to failure:

    $ docker service inspect --pretty redis
    +
    +ID:             0u6a4s31ybk7yw2wyvtikmu50
    +Name:           redis
    +...snip...
    +Update status:
    + State:      paused
    + Started:    11 seconds ago
    + Message:    update paused due to failure or early termination of task 9p7ith557h8ndf0ui9s0q951b
    +...snip...
    +

    To restart a paused update run docker service update <SERVICE-ID>. For example:

    $ docker service update redis
    +

    To avoid repeating certain update failures, you may need to reconfigure the service by passing flags to docker service update.

  6. Run docker service ps <SERVICE-ID> to watch the rolling update:

    $ docker service ps redis
    +
    +NAME                                   IMAGE        NODE       DESIRED STATE  CURRENT STATE            ERROR
    +redis.1.dos1zffgeofhagnve8w864fco      redis:3.0.7  worker1    Running        Running 37 seconds
    + \_ redis.1.88rdo6pa52ki8oqx6dogf04fh  redis:3.0.6  worker2    Shutdown       Shutdown 56 seconds ago
    +redis.2.9l3i4j85517skba5o7tn5m8g0      redis:3.0.7  worker2    Running        Running About a minute
    + \_ redis.2.66k185wilg8ele7ntu8f6nj6i  redis:3.0.6  worker1    Shutdown       Shutdown 2 minutes ago
    +redis.3.egiuiqpzrdbxks3wxgn8qib1g      redis:3.0.7  worker1    Running        Running 48 seconds
    + \_ redis.3.ctzktfddb2tepkr45qcmqln04  redis:3.0.6  mmanager1  Shutdown       Shutdown 2 minutes ago
    +

    Before Swarm updates all of the tasks, you can see that some are running redis:3.0.6 while others are running redis:3.0.7. The output above shows the state once the rolling updates are done.

What’s next?

Next, learn about how to drain a node in the swarm.

+

tutorial, cluster management, swarm, service, rolling-update

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fscale-service%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fscale-service%2Findex.html new file mode 100644 index 00000000..5270a03c --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm-tutorial%2Fscale-service%2Findex.html @@ -0,0 +1,25 @@ +

Scale the service in the swarm

+

Once you have deployed a service to a swarm, you are ready to use the Docker CLI to scale the number of containers in the service. Containers running in a service are called “tasks.”

  1. If you haven’t already, open a terminal and ssh into the machine where you run your manager node. For example, the tutorial uses a machine named manager1.

  2. Run the following command to change the desired state of the service running in the swarm:

    $ docker service scale <SERVICE-ID>=<NUMBER-OF-TASKS>
    +

    For example:

    $ docker service scale helloworld=5
    +
    +helloworld scaled to 5
    +
  3. Run docker service ps <SERVICE-ID> to see the updated task list:

    $ docker service ps helloworld
    +
    +NAME                                    IMAGE   NODE      DESIRED STATE  CURRENT STATE
    +helloworld.1.8p1vev3fq5zm0mi8g0as41w35  alpine  worker2   Running        Running 7 minutes
    +helloworld.2.c7a7tcdq5s0uk3qr88mf8xco6  alpine  worker1   Running        Running 24 seconds
    +helloworld.3.6crl09vdcalvtfehfh69ogfb1  alpine  worker1   Running        Running 24 seconds
    +helloworld.4.auky6trawmdlcne8ad8phb0f1  alpine  manager1  Running        Running 24 seconds
    +helloworld.5.ba19kca06l18zujfwxyc5lkyn  alpine  worker2   Running        Running 24 seconds
    +

    You can see that swarm has created 4 new tasks to scale to a total of 5 running instances of Alpine Linux. The tasks are distributed between the three nodes of the swarm. One is running on manager1.

  4. Run docker ps to see the containers running on the node where you’re connected. The following example shows the tasks running on manager1:

    $ docker ps
    +
    +CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
    +528d68040f95        alpine:latest       "ping docker.com"   About a minute ago   Up About a minute                       helloworld.4.auky6trawmdlcne8ad8phb0f1
    +

    If you want to see the containers running on other nodes, ssh into those nodes and run the docker ps command.

What’s next?

At this point in the tutorial, you’re finished with the helloworld service. The next step shows how to delete the service.

+

tutorial, cluster management, swarm mode, scale

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm-tutorial/scale-service/ +

+
diff --git a/devdocs/docker/engine%2Fswarm%2Fswarm_manager_locking%2Findex.html b/devdocs/docker/engine%2Fswarm%2Fswarm_manager_locking%2Findex.html new file mode 100644 index 00000000..112f57ad --- /dev/null +++ b/devdocs/docker/engine%2Fswarm%2Fswarm_manager_locking%2Findex.html @@ -0,0 +1,65 @@ +

Lock your swarm to protect its encryption key

+ +

The Raft logs used by swarm managers are encrypted on disk by default. This at-rest encryption protects your service’s configuration and data from attackers who gain access to the encrypted Raft logs. One of the reasons this feature was introduced was in support of the Docker secrets feature.

When Docker restarts, both the TLS key used to encrypt communication among swarm nodes, and the key used to encrypt and decrypt Raft logs on disk, are loaded into each manager node’s memory. Docker has the ability to protect the mutual TLS encryption key and the key used to encrypt and decrypt Raft logs at rest, by allowing you to take ownership of these keys and to require manual unlocking of your managers. This feature is called autolock.

When Docker restarts, you must unlock the swarm first, using a key encryption key generated by Docker when the swarm was locked. You can rotate this key encryption key at any time.

Note: You don’t need to unlock the swarm when a new node joins the swarm, because the key is propagated to it over mutual TLS.

Initialize a swarm with autolocking enabled

When you initialize a new swarm, you can use the --autolock flag to enable autolocking of swarm manager nodes when Docker restarts.

$ docker swarm init --autolock
+
+Swarm initialized: current node (k1q27tfyx9rncpixhk69sa61v) is now a manager.
+
+To add a worker to this swarm, run the following command:
+
+    docker swarm join \
+    --token SWMTKN-1-0j52ln6hxjpxk2wgk917abcnxywj3xed0y8vi1e5m9t3uttrtu-7bnxvvlz2mrcpfonjuztmtts9 \
+    172.31.46.109:2377
+
+To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
+
+To unlock a swarm manager after it restarts, run the `docker swarm unlock`
+command and provide the following key:
+
+    SWMKEY-1-WuYH/IX284+lRcXuoVf38viIDK3HJEKY13MIHX+tTt8
+

Store the key in a safe place, such as in a password manager.

When Docker restarts, you need to unlock the swarm. A locked swarm causes an error like the following when you try to start or restart a service:

$ sudo service docker restart
+
+$ docker service ls
+
+Error response from daemon: Swarm is encrypted and needs to be unlocked before it can be used. Use "docker swarm unlock" to unlock it.
+

Enable or disable autolock on an existing swarm

To enable autolock on an existing swarm, set the autolock flag to true.

$ docker swarm update --autolock=true
+
+Swarm updated.
+To unlock a swarm manager after it restarts, run the `docker swarm unlock`
+command and provide the following key:
+
+    SWMKEY-1-+MrE8NgAyKj5r3NcR4FiQMdgu+7W72urH0EZeSmP/0Y
+
+Please remember to store this key in a password manager, since without it you
+will not be able to restart the manager.
+

To disable autolock, set --autolock to false. The mutual TLS key and the encryption key used to read and write Raft logs are stored unencrypted on disk. There is a trade-off between the risk of storing the encryption key unencrypted at rest and the convenience of restarting a swarm without needing to unlock each manager.

$ docker swarm update --autolock=false
+

Keep the unlock key around for a short time after disabling autolocking, in case a manager goes down while it is still configured to lock using the old key.

Unlock a swarm

To unlock a locked swarm, use docker swarm unlock.

$ docker swarm unlock
+
+Please enter unlock key:
+

Enter the encryption key that was generated and shown in the command output when you locked the swarm or rotated the key, and the swarm unlocks.

View the current unlock key for a running swarm

Consider a situation where your swarm is running as expected, then a manager node becomes unavailable. You troubleshoot the problem and bring the physical node back online, but you need to unlock the manager by providing the unlock key to read the encrypted credentials and Raft logs.

If the key has not been rotated since the node left the swarm, and you have a quorum of functional manager nodes in the swarm, you can view the current unlock key using docker swarm unlock-key without any arguments.

$ docker swarm unlock-key
+
+To unlock a swarm manager after it restarts, run the `docker swarm unlock`
+command and provide the following key:
+
+    SWMKEY-1-8jDgbUNlJtUe5P/lcr9IXGVxqZpZUXPzd+qzcGp4ZYA
+
+Please remember to store this key in a password manager, since without it you
+will not be able to restart the manager.
+

If the key was rotated after the swarm node became unavailable and you do not have a record of the previous key, you may need to force the manager to leave the swarm and join it back to the swarm as a new manager.

Rotate the unlock key

You should rotate the locked swarm’s unlock key on a regular schedule.

$ docker swarm unlock-key --rotate
+
+Successfully rotated manager unlock key.
+
+To unlock a swarm manager after it restarts, run the `docker swarm unlock`
+command and provide the following key:
+
+    SWMKEY-1-8jDgbUNlJtUe5P/lcr9IXGVxqZpZUXPzd+qzcGp4ZYA
+
+Please remember to store this key in a password manager, since without it you
+will not be able to restart the manager.
+

Warning: When you rotate the unlock key, keep a record of the old key around for a few minutes, so that if a manager goes down before it gets the new key, it may still be unlocked with the old one.

+

swarm, manager, lock, unlock, autolock, encryption

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/engine/swarm/swarm_manager_locking/ +

+
diff --git a/devdocs/docker/get-started%2F02_our_app%2Findex.html b/devdocs/docker/get-started%2F02_our_app%2Findex.html new file mode 100644 index 00000000..f2d9b67f --- /dev/null +++ b/devdocs/docker/get-started%2F02_our_app%2Findex.html @@ -0,0 +1,20 @@ +

Sample application

+ +

For the rest of this tutorial, we will be working with a simple todo list manager that is running in Node.js. If you’re not familiar with Node.js, don’t worry. No real JavaScript experience is needed.

At this point, your development team is quite small and you’re simply building an app to prove out your MVP (minimum viable product). You want to show how it works and what it’s capable of doing without needing to think about how it will work for a large team, multiple developers, etc.

Todo List Manager Screenshot

Get the app

Before we can run the application, we need to get the application source code onto our machine. For real projects, you will typically clone the repo. But, for this tutorial, we have created a ZIP file containing the application.

  1. Download the App contents. You can either pull the entire project or download it as a zip and extract the app folder out to get started with.

  2. Once extracted, use your favorite code editor to open the project. If you’re in need of an editor, you can use Visual Studio Code. You should see the package.json and two subdirectories (src and spec).

    Screenshot of Visual Studio Code opened with the app loaded

Build the app’s container image

In order to build the application, we need to use a Dockerfile. A Dockerfile is simply a text-based script of instructions that is used to create a container image. If you’ve created Dockerfiles before, you might see a few flaws in the Dockerfile below. But, don’t worry. We’ll go over them.

  1. Create a file named Dockerfile in the same folder as the file package.json with the following contents.

    # syntax=docker/dockerfile:1
    +FROM node:12-alpine
    +RUN apk add --no-cache python2 g++ make
    +WORKDIR /app
    +COPY . .
    +RUN yarn install --production
    +CMD ["node", "src/index.js"]
    +EXPOSE 3000
    +

    Please check that the file Dockerfile has no file extension like .txt. Some editors may append this file extension automatically and this would result in an error in the next step.

  2. If you haven’t already done so, open a terminal and go to the app directory with the Dockerfile. Now build the container image using the docker build command.

    $ docker build -t getting-started .
    +

    This command used the Dockerfile to build a new container image. You might have noticed that a lot of “layers” were downloaded. This is because we instructed the builder that we wanted to start from the node:12-alpine image. But, since we didn’t have that on our machine, that image needed to be downloaded.

    After the image was downloaded, we copied in our application and used yarn to install our application’s dependencies. The CMD directive specifies the default command to run when starting a container from this image.

    Finally, the -t flag tags our image. Think of this simply as a human-readable name for the final image. Since we named the image getting-started, we can refer to that image when we run a container.

    The . at the end of the docker build command tells Docker that it should look for the Dockerfile in the current directory.

Start an app container

Now that we have an image, let’s run the application. To do so, we will use the docker run command (remember that from earlier?).

  1. Start your container using the docker run command and specify the name of the image we just created:

    $ docker run -dp 3000:3000 getting-started
    +

    Remember the -d and -p flags? We’re running the new container in “detached” mode (in the background) and creating a mapping between the host’s port 3000 to the container’s port 3000. Without the port mapping, we wouldn’t be able to access the application.

  2. After a few seconds, open your web browser to http://localhost:3000. You should see our app.

    Empty Todo List

  3. Go ahead and add an item or two and see that it works as you expect. You can mark items as complete and remove items. Your frontend is successfully storing items in the backend. Pretty quick and easy, huh?

At this point, you should have a running todo list manager with a few items, all built by you. Now, let’s make a few changes and learn about managing our containers.

If you take a quick look at the Docker Dashboard, you should see your two containers running now (this tutorial and your freshly launched app container).

Docker Dashboard with tutorial and app containers running

Recap

In this short section, we learned the very basics about building a container image and created a Dockerfile to do so. Once we built an image, we started the container and saw the running app.

Next, we’re going to make a modification to our app and learn how to update our running application with a new image. Along the way, we’ll learn a few other useful commands.

+

get started, setup, orientation, quickstart, intro, concepts, containers, docker desktop

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/02_our_app/ +

+
diff --git a/devdocs/docker/get-started%2F04_sharing_app%2Findex.html b/devdocs/docker/get-started%2F04_sharing_app%2Findex.html new file mode 100644 index 00000000..568f66a4 --- /dev/null +++ b/devdocs/docker/get-started%2F04_sharing_app%2Findex.html @@ -0,0 +1,16 @@ +

Share the application

+ +

Now that we’ve built an image, let’s share it! To share Docker images, you have to use a Docker registry. The default registry is Docker Hub and is where all of the images we’ve used have come from.

Docker ID

A Docker ID allows you to access Docker Hub which is the world’s largest library and community for container images. Create a Docker ID for free if you don’t have one.

Create a repo

To push an image, we first need to create a repository on Docker Hub.

  1. Sign up or Sign in to Docker Hub.

  2. Click the Create Repository button.

  3. For the repo name, use getting-started. Make sure the Visibility is Public.

    Private repositories

    Did you know that Docker offers private repositories which allows you to restrict content to specific users or teams? Check out the details on the Docker pricing page.

  4. Click the Create button!

If you look at the image below an example Docker command can be seen. This command will push to this repo.

Docker command with push example

Push the image

  1. In the command line, try running the push command you see on Docker Hub. Note that your command will be using your namespace, not “docker”.

     $ docker push docker/getting-started
    + The push refers to repository [docker.io/docker/getting-started]
    + An image does not exist locally with the tag: docker/getting-started
    +

    Why did it fail? The push command was looking for an image named docker/getting-started, but didn’t find one. If you run docker image ls, you won’t see one either.

    To fix this, we need to “tag” our existing image we’ve built to give it another name.

  2. Login to the Docker Hub using the command docker login -u YOUR-USER-NAME.

  3. Use the docker tag command to give the getting-started image a new name. Be sure to swap out YOUR-USER-NAME with your Docker ID.

     $ docker tag getting-started YOUR-USER-NAME/getting-started
    +
  4. Now try your push command again. If you’re copying the value from Docker Hub, you can drop the tagname portion, as we didn’t add a tag to the image name. If you don’t specify a tag, Docker will use a tag called latest.

     $ docker push YOUR-USER-NAME/getting-started
    +

Run the image on a new instance

Now that our image has been built and pushed into a registry, let’s try running our app on a brand new instance that has never seen this container image! To do this, we will use Play with Docker.

  1. Open your browser to Play with Docker.

  2. Click Login and then select docker from the drop-down list.

  3. Connect with your Docker Hub account.

  4. Once you’re logged in, click on the ADD NEW INSTANCE option on the left side bar. If you don’t see it, make your browser a little wider. After a few seconds, a terminal window opens in your browser.

    Play with Docker add new instance

  5. In the terminal, start your freshly pushed app.

     $ docker run -dp 3000:3000 YOUR-USER-NAME/getting-started
    +

    You should see the image get pulled down and eventually start up!

  6. Click on the 3000 badge when it comes up and you should see the app with your modifications! Hooray! If the 3000 badge doesn’t show up, you can click on the “Open Port” button and type in 3000.

Recap

In this section, we learned how to share our images by pushing them to a registry. We then went to a brand new instance and were able to run the freshly pushed image. This is quite common in CI pipelines, where the pipeline will create the image and push it to a registry and then the production environment can use the latest version of the image.

Now that we have that figured out, let’s circle back around to what we noticed at the end of the last section. As a reminder, we noticed that when we restarted the app, we lost all of our todo list items. That’s obviously not a great user experience, so let’s learn how we can persist the data across restarts!

+

get started, setup, orientation, quickstart, intro, concepts, containers, docker desktop, docker hub, sharing

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/04_sharing_app/ +

+
diff --git a/devdocs/docker/get-started%2Findex.html b/devdocs/docker/get-started%2Findex.html new file mode 100644 index 00000000..734657fc --- /dev/null +++ b/devdocs/docker/get-started%2Findex.html @@ -0,0 +1,15 @@ +

Orientation and setup

+ +

Update to the Docker Desktop terms

Commercial use of Docker Desktop in larger enterprises (more than 250 employees OR more than $10 million USD in annual revenue) now requires a paid subscription.

Welcome! We are excited that you want to learn Docker.

This page contains step-by-step instructions on how to get started with Docker. In this tutorial, you’ll learn how to:

In addition, you’ll also learn about the best practices for building images, including instructions on how to scan your images for security vulnerabilities.

If you are looking for information on how to containerize an application using your favorite language, see Language-specific getting started guides.

We also recommend the video walkthrough from DockerCon 2020.

Download and install Docker

This tutorial assumes you have a current version of Docker installed on your machine. If you do not have Docker installed, choose your preferred operating system below to download Docker:

Mac with Intel chip Mac with Apple chip Windows Linux

For Docker Desktop installation instructions, see:

Start the tutorial

If you’ve already run the command to get started with the tutorial, congratulations! If not, open a command prompt or bash window, and run the command:

$ docker run -d -p 80:80 docker/getting-started
+

You’ll notice a few flags being used. Here’s some more info on them:

Tip

You can combine single character flags to shorten the full command. As an example, the command above could be written as:

$ docker run -dp 80:80 docker/getting-started
+

The Docker Dashboard

Before going too far, we want to highlight the Docker Dashboard, which gives you a quick view of the containers running on your machine. The Docker Dashboard is available for Mac and Windows. It gives you quick access to container logs, lets you get a shell inside the container, and lets you easily manage container lifecycle (stop, remove, etc.).

To access the dashboard, follow the instructions in the Docker Desktop manual. If you open the dashboard now, you will see this tutorial running! The container name (jolly_bouman below) is a randomly created name. So, you’ll most likely have a different name.

Tutorial container running in Docker Dashboard

What is a container?

Now that you’ve run a container, what is a container? Simply put, a container is a sandboxed process on your machine that is isolated from all other processes on the host machine. That isolation leverages kernel namespaces and cgroups, features that have been in Linux for a long time. Docker has worked to make these capabilities approachable and easy to use. To summarize, a container:

Creating containers from scratch

If you’d like to see how containers are built from scratch, Liz Rice from Aqua Security has a fantastic talk in which she creates a container from scratch in Go. While the talk does not go into networking, using images for the filesystem, and other advanced topics, it gives a fantastic deep dive into how things are working.

What is a container image?

When running a container, it uses an isolated filesystem. This custom filesystem is provided by a container image. Since the image contains the container’s filesystem, it must contain everything needed to run an application - all dependencies, configuration, scripts, binaries, etc. The image also contains other configuration for the container, such as environment variables, a default command to run, and other metadata.

We’ll dive deeper into images later on, covering topics such as layering, best practices, and more.

Info

If you’re familiar with chroot, think of a container as an extended version of chroot. The filesystem is simply coming from the image. But, a container adds additional isolation not available when simply using chroot.

CLI references

Refer to the following topics for further documentation on all CLI commands used in this article:

+

get started, setup, orientation, quickstart, intro, concepts, containers, docker desktop

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/ +

+
diff --git a/devdocs/docker/get-started%2Fkube-deploy%2Findex.html b/devdocs/docker/get-started%2Fkube-deploy%2Findex.html new file mode 100644 index 00000000..baa480ba --- /dev/null +++ b/devdocs/docker/get-started%2Fkube-deploy%2Findex.html @@ -0,0 +1,56 @@ +

Deploy to Kubernetes

+ +

Prerequisites

Introduction

Now that we’ve demonstrated that the individual components of our application run as stand-alone containers, it’s time to arrange for them to be managed by an orchestrator like Kubernetes. Kubernetes provides many tools for scaling, networking, securing and maintaining your containerized applications, above and beyond the abilities of containers themselves.

In order to validate that our containerized application works well on Kubernetes, we’ll use Docker Desktop’s built in Kubernetes environment right on our development machine to deploy our application, before handing it off to run on a full Kubernetes cluster in production. The Kubernetes environment created by Docker Desktop is fully featured, meaning it has all the Kubernetes features your app will enjoy on a real cluster, accessible from the convenience of your development machine.

Describing apps using Kubernetes YAML

All containers in Kubernetes are scheduled as pods, which are groups of co-located containers that share some resources. Furthermore, in a realistic application we almost never create individual pods; instead, most of our workloads are scheduled as deployments, which are scalable groups of pods maintained automatically by Kubernetes. Lastly, all Kubernetes objects can and should be described in manifests called Kubernetes YAML files. These YAML files describe all the components and configurations of your Kubernetes app, and can be used to easily create and destroy your app in any Kubernetes environment.

  1. You already wrote a very basic Kubernetes YAML file in the Orchestration overview part of this tutorial. Now, let’s write a slightly more sophisticated YAML file to run and manage our bulletin board. Place the following in a file called bb.yaml:

    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: bb-demo
    +  namespace: default
    +spec:
    +  replicas: 1
    +  selector:
    +    matchLabels:
    +      bb: web
    +  template:
    +    metadata:
    +      labels:
    +        bb: web
    +    spec:
    +      containers:
    +      - name: bb-site
    +        image: getting-started
    +---
    +apiVersion: v1
    +kind: Service
    +metadata:
    +  name: bb-entrypoint
    +  namespace: default
    +spec:
    +  type: NodePort
    +  selector:
    +    bb: web
    +  ports:
    +  - port: 3000
    +    targetPort: 3000
    +    nodePort: 30001
    +

    In this Kubernetes YAML file, we have two objects, separated by the ---:

    • A Deployment, describing a scalable group of identical pods. In this case, you’ll get just one replica, or copy of your pod, and that pod (which is described under the template: key) has just one container in it, based off of your bulletinboard:1.0 image from the previous step in this tutorial.
    • A NodePort service, which will route traffic from port 30001 on your host to port 3000 inside the pods it routes to, allowing you to reach your bulletin board from the network.

    Also, notice that while Kubernetes YAML can appear long and complicated at first, it almost always follows the same pattern:

    • The apiVersion, which indicates the Kubernetes API that parses this object
    • The kind indicating what sort of object this is
    • Some metadata applying things like names to your objects
    • The spec specifying all the parameters and configurations of your object.

Deploy and check your application

  1. In a terminal, navigate to where you created bb.yaml and deploy your application to Kubernetes:

    $ kubectl apply -f bb.yaml
    +

    you should see output that looks like the following, indicating your Kubernetes objects were created successfully:

    deployment.apps/bb-demo created
    +service/bb-entrypoint created
    +
  2. Make sure everything worked by listing your deployments:

    $ kubectl get deployments
    +

    if all is well, your deployment should be listed as follows:

    NAME      READY   UP-TO-DATE   AVAILABLE   AGE
    +bb-demo   1/1     1            1           40s
    +

    This indicates all one of the pods you asked for in your YAML are up and running. Do the same check for your services:

    $ kubectl get services
    +
    +NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
    +bb-entrypoint   NodePort    10.106.145.116   <none>        3000:30001/TCP   53s
    +kubernetes      ClusterIP   10.96.0.1        <none>        443/TCP          138d
    +

    In addition to the default kubernetes service, we see our bb-entrypoint service, accepting traffic on port 30001/TCP.

  3. Open a browser and visit your bulletin board at localhost:30001; you should see your bulletin board, the same as when we ran it as a stand-alone container in Part 2 of the Quickstart tutorial.

  4. Once satisfied, tear down your application:

    $ kubectl delete -f bb.yaml
    +

Conclusion

At this point, we have successfully used Docker Desktop to deploy our application to a fully-featured Kubernetes environment on our development machine. We haven’t done much with Kubernetes yet, but the door is now open; you can begin adding other components to your app and taking advantage of all the features and power of Kubernetes, right on your own machine.

In addition to deploying to Kubernetes, we have also described our application as a Kubernetes YAML file. This simple text file contains everything we need to create our application in a running state. We can check it into version control and share it with our colleagues, allowing us to distribute our applications to other clusters (like the testing and production clusters that probably come after our development environments) easily.

Kubernetes references

Further documentation for all new Kubernetes objects used in this article are available here:

+

kubernetes, pods, deployments, kubernetes services

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/kube-deploy/ +

+
diff --git a/devdocs/docker/get-started%2Forchestration%2Findex.html b/devdocs/docker/get-started%2Forchestration%2Findex.html new file mode 100644 index 00000000..4fbed048 --- /dev/null +++ b/devdocs/docker/get-started%2Forchestration%2Findex.html @@ -0,0 +1,88 @@ +

Orchestration

+ +

The portability and reproducibility of a containerized process provides an opportunity to move and scale our containerized applications across clouds and datacenters. Containers effectively guarantee that those applications run the same way anywhere, allowing us to quickly and easily take advantage of all these environments. Additionally, as we scale our applications up, we need some tooling to help automate the maintenance of those applications, enable the replacement of failed containers automatically, and manage the rollout of updates and reconfigurations of those containers during their lifecycle.

Tools to manage, scale, and maintain containerized applications are called orchestrators, and the most common examples of these are Kubernetes and Docker Swarm. Development environment deployments of both of these orchestrators are provided by Docker Desktop, which we’ll use throughout this guide to create our first orchestrated, containerized application.

The advanced modules teach you how to:

  1. Set up and use a Kubernetes environment on your development machine
  2. Set up and use a Swarm environment on your development machine

Enable Kubernetes

Docker Desktop will set up Kubernetes for you quickly and easily. Follow the setup and validation instructions appropriate for your operating system:

Mac

  1. After installing Docker Desktop, you should see a Docker icon in your menu bar. Click on it, and navigate to Preferences > Kubernetes.

  2. Check the checkbox labeled Enable Kubernetes, and click Apply & Restart. Docker Desktop will automatically set up Kubernetes for you. You’ll know that Kubernetes has been successfully enabled when you see a green light beside ‘Kubernetes running’ in the Preferences menu.

  3. In order to confirm that Kubernetes is up and running, create a text file called pod.yaml with the following content:

    apiVersion: v1
    +kind: Pod
    +metadata:
    +  name: demo
    +spec:
    +  containers:
    +  - name: testpod
    +    image: alpine:latest
    +    command: ["ping", "8.8.8.8"]
    +

    This describes a pod with a single container, isolating a simple ping to 8.8.8.8.

  4. In a terminal, navigate to where you created pod.yaml and create your pod:

    $ kubectl apply -f pod.yaml
    +
  5. Check that your pod is up and running:

    $ kubectl get pods
    +

    You should see something like:

    NAME      READY     STATUS    RESTARTS   AGE
    +demo      1/1       Running   0          4s
    +
  6. Check that you get the logs you’d expect for a ping process:

    $ kubectl logs demo
    +

    You should see the output of a healthy ping process:

    PING 8.8.8.8 (8.8.8.8): 56 data bytes
    +64 bytes from 8.8.8.8: seq=0 ttl=37 time=21.393 ms
    +64 bytes from 8.8.8.8: seq=1 ttl=37 time=15.320 ms
    +64 bytes from 8.8.8.8: seq=2 ttl=37 time=11.111 ms
    +...
    +
  7. Finally, tear down your test pod:

    $ kubectl delete -f pod.yaml
    +

Windows

  1. After installing Docker Desktop, you should see a Docker icon in your system tray. Right-click on it, and navigate Settings > Kubernetes.

  2. Check the checkbox labeled Enable Kubernetes, and click Apply & Restart. Docker Desktop will automatically set up Kubernetes for you. You’ll know that Kubernetes has been successfully enabled when you see a green light beside ‘Kubernetes running’ in the Settings menu.

  3. In order to confirm that Kubernetes is up and running, create a text file called pod.yaml with the following content:

    apiVersion: v1
    +kind: Pod
    +metadata:
    +  name: demo
    +spec:
    +  containers:
    +  - name: testpod
    +    image: alpine:latest
    +    command: ["ping", "8.8.8.8"]
    +

    This describes a pod with a single container, isolating a simple ping to 8.8.8.8.

  4. In PowerShell, navigate to where you created pod.yaml and create your pod:

    $ kubectl apply -f pod.yaml
    +
  5. Check that your pod is up and running:

    $ kubectl get pods
    +

    You should see something like:

    NAME      READY     STATUS    RESTARTS   AGE
    +demo      1/1       Running   0          4s
    +
  6. Check that you get the logs you’d expect for a ping process:

    $ kubectl logs demo
    +

    You should see the output of a healthy ping process:

    PING 8.8.8.8 (8.8.8.8): 56 data bytes
    +64 bytes from 8.8.8.8: seq=0 ttl=37 time=21.393 ms
    +64 bytes from 8.8.8.8: seq=1 ttl=37 time=15.320 ms
    +64 bytes from 8.8.8.8: seq=2 ttl=37 time=11.111 ms
    +...
    +
  7. Finally, tear down your test pod:

    $ kubectl delete -f pod.yaml
    +

Enable Docker Swarm

Docker Desktop runs primarily on Docker Engine, which has everything you need to run a Swarm built in. Follow the setup and validation instructions appropriate for your operating system:

Mac

  1. Open a terminal, and initialize Docker Swarm mode:

    $ docker swarm init
    +

    If all goes well, you should see a message similar to the following:

    Swarm initialized: current node (tjjggogqpnpj2phbfbz8jd5oq) is now a manager.
    +
    +To add a worker to this swarm, run the following command:
    +
    +    docker swarm join --token SWMTKN-1-3e0hh0jd5t4yjg209f4g5qpowbsczfahv2dea9a1ay2l8787cf-2h4ly330d0j917ocvzw30j5x9 192.168.65.3:2377
    +
    +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
    +
  2. Run a simple Docker service that uses an alpine-based filesystem, and isolates a ping to 8.8.8.8:

    $ docker service create --name demo alpine:latest ping 8.8.8.8
    +
  3. Check that your service created one running container:

    $ docker service ps demo
    +

    You should see something like:

    ID                  NAME                IMAGE               NODE                DESIRED STATE       CURRENT STATE           ERROR               PORTS
    +463j2s3y4b5o        demo.1              alpine:latest       docker-desktop      Running             Running 8 seconds ago
    +
  4. Check that you get the logs you’d expect for a ping process:

    $ docker service logs demo
    +

    You should see the output of a healthy ping process:

    demo.1.463j2s3y4b5o@docker-desktop    | PING 8.8.8.8 (8.8.8.8): 56 data bytes
    +demo.1.463j2s3y4b5o@docker-desktop    | 64 bytes from 8.8.8.8: seq=0 ttl=37 time=13.005 ms
    +demo.1.463j2s3y4b5o@docker-desktop    | 64 bytes from 8.8.8.8: seq=1 ttl=37 time=13.847 ms
    +demo.1.463j2s3y4b5o@docker-desktop    | 64 bytes from 8.8.8.8: seq=2 ttl=37 time=41.296 ms
    +...
    +
  5. Finally, tear down your test service:

    $ docker service rm demo
    +

Windows

  1. Open a powershell, and initialize Docker Swarm mode:

    $ docker swarm init
    +

    If all goes well, you should see a message similar to the following:

    Swarm initialized: current node (tjjggogqpnpj2phbfbz8jd5oq) is now a manager.
    +
    +To add a worker to this swarm, run the following command:
    +
    +    docker swarm join --token SWMTKN-1-3e0hh0jd5t4yjg209f4g5qpowbsczfahv2dea9a1ay2l8787cf-2h4ly330d0j917ocvzw30j5x9 192.168.65.3:2377
    +
    +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
    +
  2. Run a simple Docker service that uses an alpine-based filesystem, and isolates a ping to 8.8.8.8:

    $ docker service create --name demo alpine:latest ping 8.8.8.8
    +
  3. Check that your service created one running container:

    $ docker service ps demo
    +

    You should see something like:

    ID                  NAME                IMAGE               NODE                DESIRED STATE       CURRENT STATE           ERROR               PORTS
    +463j2s3y4b5o        demo.1              alpine:latest       docker-desktop      Running             Running 8 seconds ago
    +
  4. Check that you get the logs you’d expect for a ping process:

    $ docker service logs demo
    +

    You should see the output of a healthy ping process:

    demo.1.463j2s3y4b5o@docker-desktop    | PING 8.8.8.8 (8.8.8.8): 56 data bytes
    +demo.1.463j2s3y4b5o@docker-desktop    | 64 bytes from 8.8.8.8: seq=0 ttl=37 time=13.005 ms
    +demo.1.463j2s3y4b5o@docker-desktop    | 64 bytes from 8.8.8.8: seq=1 ttl=37 time=13.847 ms
    +demo.1.463j2s3y4b5o@docker-desktop    | 64 bytes from 8.8.8.8: seq=2 ttl=37 time=41.296 ms
    +...
    +
  5. Finally, tear down your test service:

    $ docker service rm demo
    +

Conclusion

At this point, you’ve confirmed that you can run simple containerized workloads in Kubernetes and Swarm. The next step will be to write the Kubernetes yaml that describes how to run and manage these containers on Kubernetes.

On to deploying to Kubernetes >>

To learn how to write the stack file to help you run and manage containers on Swarm, see Deploying to Swarm.

CLI references

Further documentation for all CLI commands used in this article are available here:

+

orchestration, deploy, kubernetes, swarm

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/orchestration/ +

+
diff --git a/devdocs/docker/get-started%2Foverview%2Findex.html b/devdocs/docker/get-started%2Foverview%2Findex.html new file mode 100644 index 00000000..cdfd744a --- /dev/null +++ b/devdocs/docker/get-started%2Foverview%2Findex.html @@ -0,0 +1,11 @@ +

Docker overview

+ +

Docker is an open platform for developing, shipping, and running applications. Docker enables you to separate your applications from your infrastructure so you can deliver software quickly. With Docker, you can manage your infrastructure in the same ways you manage your applications. By taking advantage of Docker’s methodologies for shipping, testing, and deploying code quickly, you can significantly reduce the delay between writing code and running it in production.

The Docker platform

Docker provides the ability to package and run an application in a loosely isolated environment called a container. The isolation and security allows you to run many containers simultaneously on a given host. Containers are lightweight and contain everything needed to run the application, so you do not need to rely on what is currently installed on the host. You can easily share containers while you work, and be sure that everyone you share with gets the same container that works in the same way.

Docker provides tooling and a platform to manage the lifecycle of your containers:

What can I use Docker for?

Fast, consistent delivery of your applications

Docker streamlines the development lifecycle by allowing developers to work in standardized environments using local containers which provide your applications and services. Containers are great for continuous integration and continuous delivery (CI/CD) workflows.

Consider the following example scenario:

Responsive deployment and scaling

Docker’s container-based platform allows for highly portable workloads. Docker containers can run on a developer’s local laptop, on physical or virtual machines in a data center, on cloud providers, or in a mixture of environments.

Docker’s portability and lightweight nature also make it easy to dynamically manage workloads, scaling up or tearing down applications and services as business needs dictate, in near real time.

Running more workloads on the same hardware

Docker is lightweight and fast. It provides a viable, cost-effective alternative to hypervisor-based virtual machines, so you can use more of your compute capacity to achieve your business goals. Docker is perfect for high density environments and for small and medium deployments where you need to do more with fewer resources.

Docker architecture

Docker uses a client-server architecture. The Docker client talks to the Docker daemon, which does the heavy lifting of building, running, and distributing your Docker containers. The Docker client and daemon can run on the same system, or you can connect a Docker client to a remote Docker daemon. The Docker client and daemon communicate using a REST API, over UNIX sockets or a network interface. Another Docker client is Docker Compose, that lets you work with applications consisting of a set of containers.

Docker Architecture Diagram

The Docker daemon

The Docker daemon (dockerd) listens for Docker API requests and manages Docker objects such as images, containers, networks, and volumes. A daemon can also communicate with other daemons to manage Docker services.

The Docker client

The Docker client (docker) is the primary way that many Docker users interact with Docker. When you use commands such as docker run, the client sends these commands to dockerd, which carries them out. The docker command uses the Docker API. The Docker client can communicate with more than one daemon.

Docker Desktop

Docker Desktop is an easy-to-install application for your Mac or Windows environment that enables you to build and share containerized applications and microservices. Docker Desktop includes the Docker daemon (dockerd), the Docker client (docker), Docker Compose, Docker Content Trust, Kubernetes, and Credential Helper. For more information, see Docker Desktop.

Docker registries

A Docker registry stores Docker images. Docker Hub is a public registry that anyone can use, and Docker is configured to look for images on Docker Hub by default. You can even run your own private registry.

When you use the docker pull or docker run commands, the required images are pulled from your configured registry. When you use the docker push command, your image is pushed to your configured registry.

Docker objects

When you use Docker, you are creating and using images, containers, networks, volumes, plugins, and other objects. This section is a brief overview of some of those objects.

Images

An image is a read-only template with instructions for creating a Docker container. Often, an image is based on another image, with some additional customization. For example, you may build an image which is based on the ubuntu image, but installs the Apache web server and your application, as well as the configuration details needed to make your application run.

You might create your own images or you might only use those created by others and published in a registry. To build your own image, you create a Dockerfile with a simple syntax for defining the steps needed to create the image and run it. Each instruction in a Dockerfile creates a layer in the image. When you change the Dockerfile and rebuild the image, only those layers which have changed are rebuilt. This is part of what makes images so lightweight, small, and fast, when compared to other virtualization technologies.

Containers

A container is a runnable instance of an image. You can create, start, stop, move, or delete a container using the Docker API or CLI. You can connect a container to one or more networks, attach storage to it, or even create a new image based on its current state.

By default, a container is relatively well isolated from other containers and its host machine. You can control how isolated a container’s network, storage, or other underlying subsystems are from other containers or from the host machine.

A container is defined by its image as well as any configuration options you provide to it when you create or start it. When a container is removed, any changes to its state that are not stored in persistent storage disappear.

Example docker run command

The following command runs an ubuntu container, attaches interactively to your local command-line session, and runs /bin/bash.

$ docker run -i -t ubuntu /bin/bash
+

When you run this command, the following happens (assuming you are using the default registry configuration):

  1. If you do not have the ubuntu image locally, Docker pulls it from your configured registry, as though you had run docker pull ubuntu manually.

  2. Docker creates a new container, as though you had run a docker container create command manually.

  3. Docker allocates a read-write filesystem to the container, as its final layer. This allows a running container to create or modify files and directories in its local filesystem.

  4. Docker creates a network interface to connect the container to the default network, since you did not specify any networking options. This includes assigning an IP address to the container. By default, containers can connect to external networks using the host machine’s network connection.

  5. Docker starts the container and executes /bin/bash. Because the container is running interactively and attached to your terminal (due to the -i and -t flags), you can provide input using your keyboard while the output is logged to your terminal.

  6. When you type exit to terminate the /bin/bash command, the container stops but is not removed. You can start it again or remove it.

The underlying technology

Docker is written in the Go programming language and takes advantage of several features of the Linux kernel to deliver its functionality. Docker uses a technology called namespaces to provide the isolated workspace called the container. When you run a container, Docker creates a set of namespaces for that container.

These namespaces provide a layer of isolation. Each aspect of a container runs in a separate namespace and its access is limited to that namespace.

Next steps

+

docker, introduction, documentation, about, technology, understanding

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/overview/ +

+
diff --git a/devdocs/docker/get-started%2Fresources%2Findex.html b/devdocs/docker/get-started%2Fresources%2Findex.html new file mode 100644 index 00000000..f0c09797 --- /dev/null +++ b/devdocs/docker/get-started%2Fresources%2Findex.html @@ -0,0 +1,24 @@ +

Educational resources

+ +

Docker and the broader community of Docker experts have put together many different ways to get further training and hands-on experience with Docker. Expand your understanding of Docker and Kubernetes with these additional free and paid resources.

Hosted labs

These self-paced and hands-on workshops use a free, hosted environment (Play with Kubernetes) that does not require any installation. Follow along and learn more about Kubernetes.

Self-guided tutorials

Created by experts in the Docker community, these free tutorials provide guided step-by-step workflows for working with the Docker platform.

Books

If books are your preferred learning style, check out these written by the Docker Captains. Docker Captain is a distinction that Docker awards to select members of the community that are both experts in their field and are committed to sharing their Docker knowledge with others.

Self-Paced online learning

A number of Docker Captains have also created video courses on Docker and Kubernetes.

* Many of the courses are fee-based

+

get started, setup, orientation, quickstart, intro, concepts, kubernetes, docker desktop

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/resources/ +

+
diff --git a/devdocs/docker/get-started%2Fswarm-deploy%2Findex.html b/devdocs/docker/get-started%2Fswarm-deploy%2Findex.html new file mode 100644 index 00000000..7455015f --- /dev/null +++ b/devdocs/docker/get-started%2Fswarm-deploy%2Findex.html @@ -0,0 +1,24 @@ +

Deploy to Swarm

+ +

Prerequisites

Introduction

Now that we’ve demonstrated that the individual components of our application run as stand-alone containers and shown how to deploy it using Kubernetes, let’s look at how to arrange for them to be managed by Docker Swarm. Swarm provides many tools for scaling, networking, securing and maintaining your containerized applications, above and beyond the abilities of containers themselves.

In order to validate that our containerized application works well on Swarm, we’ll use Docker Desktop’s built in Swarm environment right on our development machine to deploy our application, before handing it off to run on a full Swarm cluster in production. The Swarm environment created by Docker Desktop is fully featured, meaning it has all the Swarm features your app will enjoy on a real cluster, accessible from the convenience of your development machine.

Describe apps using stack files

Swarm never creates individual containers like we did in the previous step of this tutorial. Instead, all Swarm workloads are scheduled as services, which are scalable groups of containers with added networking features maintained automatically by Swarm. Furthermore, all Swarm objects can and should be described in manifests called stack files. These YAML files describe all the components and configurations of your Swarm app, and can be used to easily create and destroy your app in any Swarm environment.

Let’s write a simple stack file to run and manage our bulletin board. Place the following in a file called bb-stack.yaml:

version: '3.7'
+
+services:
+  bb-app:
+    image: bulletinboard:1.0
+    ports:
+      - "8000:8080"
+

In this Swarm YAML file, we have just one object: a service, describing a scalable group of identical containers. In this case, you’ll get just one container (the default), and that container will be based on your bulletinboard:1.0 image created in Part 2 of the Quickstart tutorial. In addition, We’ve asked Swarm to forward all traffic arriving at port 8000 on our development machine to port 8080 inside our bulletin board container.

Kubernetes Services and Swarm Services are very different! Despite the similar name, the two orchestrators mean very different things by the term ‘service’. In Swarm, a service provides both scheduling and networking facilities, creating containers and providing tools for routing traffic to them. In Kubernetes, scheduling and networking are handled separately: deployments (or other controllers) handle the scheduling of containers as pods, while services are responsible only for adding networking features to those pods.

Deploy and check your application

  1. Deploy your application to Swarm:

    $ docker stack deploy -c bb-stack.yaml demo
    +

    If all goes well, Swarm will report creating all your stack objects with no complaints:

    Creating network demo_default
    +Creating service demo_bb-app
    +

    Notice that in addition to your service, Swarm also creates a Docker network by default to isolate the containers deployed as part of your stack.

  2. Make sure everything worked by listing your service:

    $ docker service ls
    +

    If all has gone well, your service will report with 1/1 of its replicas created:

    ID                  NAME                MODE                REPLICAS            IMAGE               PORTS
    +il7elwunymbs        demo_bb-app         replicated          1/1                 bulletinboard:1.0   *:8000->8080/tcp
    +

    This indicates 1/1 containers you asked for as part of your services are up and running. Also, we see that port 8000 on your development machine is getting forwarded to port 8080 in your bulletin board container.

  3. Open a browser and visit your bulletin board at localhost:8000; you should see your bulletin board, the same as when we ran it as a stand-alone container in Part 2 of the Quickstart tutorial.

  4. Once satisfied, tear down your application:

    $ docker stack rm demo
    +

Conclusion

At this point, we have successfully used Docker Desktop to deploy our application to a fully-featured Swarm environment on our development machine. We haven’t done much with Swarm yet, but the door is now open: you can begin adding other components to your app and taking advantage of all the features and power of Swarm, right on your own machine.

In addition to deploying to Swarm, we have also described our application as a stack file. This simple text file contains everything we need to create our application in a running state; we can check it into version control and share it with our colleagues, allowing us to distribute our applications to other clusters (like the testing and production clusters that probably come after our development environments) easily.

Swarm and CLI references

Further documentation for all new Swarm objects and CLI commands used in this article are available here:

+

swarm, swarm services, stacks

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/get-started/swarm-deploy/ +

+
diff --git a/devdocs/docker/index b/devdocs/docker/index new file mode 100644 index 00000000..e6d51024 --- /dev/null +++ b/devdocs/docker/index @@ -0,0 +1 @@ +((pages . ["index" "get-started/overview/index" "get-started/index" "engine/reference/builder/index" "compose/compose-file/index" "get-started/04_sharing_app/index" "get-started/02_our_app/index" "get-started/swarm-deploy/index" "engine/install/ubuntu/index" "engine/reference/commandline/run/index" "engine/reference/commandline/dockerd/index" "engine/reference/commandline/build/index" "compose/faq/index" "compose/install/index" "compose/gettingstarted/index" "compose/environment-variables/index" "compose/networking/index" "engine/security/index" "get-started/resources/index" "machine/index" "engine/index" "compose/samples-for-compose/index" "engine/reference/commandline/container/index" "compose/compose-file/deploy/index" "compose/index" "compose/gpu-support/index" "compose/reference/index" "engine/swarm/index" "engine/install/centos/index" "engine/reference/run/index" "engine/install/linux-postinstall/index" "engine/security/rootless/index" "engine/reference/commandline/image/index" "engine/scan/index" "compose/compose-file/build/index" "get-started/orchestration/index" "engine/swarm/stack-deploy/index" "engine/reference/commandline/stack/index" "engine/reference/commandline/service/index" "engine/reference/commandline/commit/index" "engine/reference/commandline/version/index" "compose/compose-file/compose-versioning/index" "engine/swarm/how-swarm-mode-works/services/index" "engine/api/index" "engine/security/protect-access/index" "engine/reference/commandline/tag/index" "compose/startup-order/index" "engine/install/binaries/index" "engine/reference/commandline/service_create/index" "engine/extend/plugins_authorization/index" "engine/security/userns-remap/index" "compose/reference/stop/index" "compose/reference/envvars/index" "engine/install/index" "compose/env-file/index" "compose/reference/config/index" "compose/compose-file/compose-file-v2/index" "engine/reference/commandline/network_create/index" "engine/extend/plugins_network/index" "compose/compose-file/compose-file-v3/index" "engine/security/trust/index" "engine/security/seccomp/index" "engine/security/apparmor/index" "engine/reference/commandline/cli/index" "engine/release-notes/index" "engine/deprecated/index" "engine/reference/commandline/container_attach/index" "engine/reference/commandline/container_commit/index" "engine/reference/commandline/container_cp/index" "engine/reference/commandline/container_create/index" "engine/reference/commandline/container_diff/index" "engine/reference/commandline/container_exec/index" "engine/reference/commandline/container_export/index" "engine/reference/commandline/container_inspect/index" "engine/reference/commandline/container_kill/index" "engine/reference/commandline/container_logs/index" "engine/reference/commandline/container_ls/index" "engine/reference/commandline/container_pause/index" "engine/reference/commandline/container_port/index" "engine/reference/commandline/container_rename/index" "engine/reference/commandline/container_restart/index" "engine/reference/commandline/container_rm/index" "engine/reference/commandline/container_start/index" "engine/reference/commandline/container_stats/index" "compose/profiles/index" "compose/production/index" "engine/reference/commandline/container_prune/index" "engine/reference/commandline/container_run/index" "engine/reference/commandline/container_stop/index" "engine/reference/commandline/container_top/index" "compose/extends/index" "compose/reference/pull/index" "engine/reference/commandline/container_unpause/index" "engine/reference/commandline/container_update/index" "engine/reference/commandline/container_wait/index" "engine/reference/commandline/ps/index" "compose/cli-command-compatibility/index" "engine/swarm/key-concepts/index" "engine/swarm/swarm-tutorial/index" "engine/reference/commandline/swarm_init/index" "engine/reference/commandline/service_ls/index" "engine/reference/commandline/service_rm/index" "engine/reference/commandline/swarm_join/index" "engine/reference/commandline/service_inspect/index" "engine/reference/commandline/service_scale/index" "engine/reference/commandline/service_ps/index" "engine/reference/commandline/attach/index" "engine/reference/commandline/inspect/index" "engine/reference/commandline/image_build/index" "engine/reference/commandline/image_history/index" "engine/reference/commandline/image_load/index" "engine/reference/commandline/image_ls/index" "engine/reference/commandline/service_update/index" "engine/reference/commandline/image_push/index" "engine/reference/commandline/image_import/index" "engine/reference/commandline/image_tag/index" "engine/reference/commandline/image_save/index" "engine/context/working-with-contexts/index" "engine/reference/commandline/image_inspect/index" "engine/reference/commandline/image_pull/index" "get-started/kube-deploy/index" "engine/reference/commandline/stack_deploy/index" "engine/reference/commandline/stack_ps/index" "engine/security/certificates/index" "engine/reference/commandline/events/index" "engine/reference/commandline/image_prune/index" "engine/reference/commandline/image_rm/index" "engine/swarm/swarm-mode/index" "engine/reference/commandline/stack_ls/index" "engine/reference/commandline/stack_services/index" "engine/reference/commandline/service_logs/index" "engine/reference/commandline/service_rollback/index" "engine/swarm/how-swarm-mode-works/nodes/index" "engine/reference/commandline/stack_rm/index" "engine/reference/commandline/context/index" "engine/swarm/how-swarm-mode-works/pki/index" "engine/api/sdk/index" "engine/reference/commandline/secret_create/index" "engine/reference/commandline/config_create/index" "engine/swarm/swarm-tutorial/rolling-update/index" "engine/reference/commandline/volume_create/index" "engine/reference/commandline/node_update/index" "engine/swarm/ingress/index" "engine/extend/index" "engine/extend/plugin_api/index" "engine/install/debian/index" "engine/install/fedora/index" "engine/install/rhel/index" "engine/install/sles/index" "compose/reference/up/index" "engine/extend/plugins_volume/index" "engine/reference/commandline/network/index" "engine/reference/commandline/network_connect/index" "engine/reference/commandline/network_disconnect/index" "engine/extend/legacy_plugins/index" "engine/reference/commandline/network_inspect/index" "engine/reference/commandline/network_prune/index" "engine/security/trust/trust_sandbox/index" "engine/reference/commandline/stats/index" "engine/reference/commandline/login/index" "engine/reference/commandline/network_ls/index" "engine/reference/commandline/network_rm/index" "engine/security/trust/trust_key_mng/index" "engine/security/trust/trust_delegation/index" "engine/security/trust/trust_automation/index" "engine/reference/commandline/swarm/index" "engine/swarm/secrets/index" "engine/reference/commandline/secret_ls/index" "engine/swarm/configs/index" "engine/security/trust/deploying_notary/index" "engine/reference/commandline/plugin_ls/index" "engine/reference/commandline/node_ls/index" "engine/swarm/services/index" "engine/reference/commandline/config_ls/index" "engine/reference/commandline/images/index" "engine/swarm/swarm-tutorial/create-swarm/index" "engine/reference/commandline/swarm_join-token/index" "engine/reference/commandline/swarm_ca/index" "engine/reference/commandline/swarm_leave/index" "engine/reference/commandline/swarm_unlock/index" "engine/reference/commandline/swarm_unlock-key/index" "engine/reference/commandline/swarm_update/index" "engine/swarm/join-nodes/index" "engine/reference/commandline/node_promote/index" "engine/reference/commandline/node_demote/index" "engine/reference/commandline/context_create/index" "engine/reference/commandline/context_export/index" "engine/reference/commandline/context_import/index" "engine/reference/commandline/context_inspect/index" "engine/reference/commandline/context_ls/index" "engine/reference/commandline/context_rm/index" "engine/reference/commandline/volume_ls/index" "engine/reference/commandline/context_update/index" "engine/reference/commandline/context_use/index" "engine/reference/commandline/volume_rm/index" "engine/reference/commandline/secret_inspect/index" "engine/reference/commandline/config/index" "engine/reference/commandline/config_inspect/index" "engine/reference/commandline/volume_inspect/index" "engine/reference/commandline/volume_prune/index" "engine/api/sdk/examples/index" "engine/reference/commandline/secret_rm/index" "engine/reference/commandline/volume/index" "engine/reference/commandline/node/index" "engine/reference/commandline/node_rm/index" "engine/reference/commandline/plugin/index" "engine/reference/commandline/secret/index" "engine/reference/commandline/config_rm/index" "engine/swarm/swarm-tutorial/scale-service/index" "engine/swarm/swarm-tutorial/drain-node/index" "engine/reference/commandline/node_inspect/index" "engine/reference/commandline/node_ps/index" "engine/extend/config/index" "engine/reference/commandline/plugin_push/index" "engine/reference/commandline/docker/index" "engine/reference/commandline/plugin_install/index" "engine/reference/commandline/plugin_create/index" "engine/swarm/swarm-tutorial/add-nodes/index" "engine/reference/commandline/plugin_enable/index" "engine/reference/commandline/plugin_inspect/index" "engine/reference/commandline/plugin_rm/index" "engine/reference/commandline/info/index" "engine/reference/commandline/plugin_disable/index" "engine/reference/commandline/plugin_upgrade/index" "engine/reference/commandline/logs/index" "engine/reference/commandline/plugin_set/index" "engine/reference/commandline/exec/index" "engine/swarm/manage-nodes/index" "engine/reference/commandline/checkpoint/index" "engine/reference/commandline/diff/index" "engine/swarm/swarm-tutorial/deploy-service/index" "engine/reference/commandline/create/index" "engine/reference/commandline/export/index" "engine/reference/commandline/kill/index" "engine/reference/commandline/push/index" "engine/swarm/swarm-tutorial/delete-service/index" "engine/reference/commandline/builder/index" "engine/reference/commandline/cp/index" "engine/reference/commandline/history/index" "engine/reference/commandline/load/index" "engine/swarm/admin_guide/index" "engine/reference/commandline/import/index" "engine/reference/commandline/pause/index" "engine/reference/commandline/port/index" "engine/reference/commandline/save/index" "engine/reference/commandline/manifest/index" "engine/reference/commandline/restart/index" "engine/reference/commandline/rm/index" "engine/reference/commandline/logout/index" "engine/reference/commandline/pull/index" "engine/reference/commandline/rename/index" "engine/reference/commandline/start/index" "engine/reference/commandline/stop/index" "engine/reference/commandline/rmi/index" "engine/reference/commandline/system/index" "engine/reference/commandline/search/index" "engine/reference/commandline/unpause/index" "engine/reference/commandline/checkpoint_ls/index" "engine/reference/commandline/trust/index" "engine/reference/commandline/builder_prune/index" "engine/reference/commandline/update/index" "engine/reference/commandline/checkpoint_rm/index" "engine/reference/commandline/wait/index" "engine/reference/commandline/checkpoint_create/index" "engine/swarm/swarm-tutorial/inspect-service/index" "engine/reference/commandline/manifest_push/index" "engine/reference/commandline/manifest_inspect/index" "engine/reference/commandline/system_prune/index" "engine/reference/commandline/top/index" "engine/swarm/raft/index" "engine/reference/commandline/manifest_annotate/index" "engine/reference/commandline/builder_build/index" "engine/reference/commandline/manifest_create/index" "engine/reference/commandline/system_events/index" "engine/reference/commandline/system_info/index" "engine/reference/commandline/system_df/index" "engine/reference/commandline/trust_revoke/index" "engine/swarm/swarm_manager_locking/index" "engine/reference/commandline/trust_sign/index" "engine/reference/commandline/manifest_rm/index" "engine/reference/commandline/trust_inspect/index" "engine/reference/commandline/trust_key/index" "engine/reference/commandline/trust_signer/index" "engine/reference/commandline/trust_key_load/index" "engine/reference/commandline/trust_key_generate/index" "engine/reference/commandline/trust_signer_remove/index" "engine/reference/commandline/trust_signer_add/index"]) (entries . [((name . "Access authorization plugin") (path . "engine/extend/plugins_authorization/index") (type . "Engine: Extend")) ((name . "Add nodes to the swarm") (path . "engine/swarm/swarm-tutorial/add-nodes/index") (type . "Engine")) ((name . "Administer and maintain a swarm of Docker Engines") (path . "engine/swarm/admin_guide/index") (type . "Engine")) ((name . "AppArmor security profiles for Docker") (path . "engine/security/apparmor/index") (type . "Engine: Security")) ((name . "Apply rolling updates to a service") (path . "engine/swarm/swarm-tutorial/rolling-update/index") (type . "Engine")) ((name . "Automation with content trust") (path . "engine/security/trust/trust_automation/index") (type . "Engine: Security")) ((name . "Compose") (path . "compose/index") (type . "Compose")) ((name . "Compose CLI environment variables") (path . "compose/reference/envvars/index") (type . "Compose")) ((name . "Compose command compatibility with docker-compose") (path . "compose/cli-command-compatibility/index") (type . "Compose")) ((name . "Compose file build reference") (path . "compose/compose-file/build/index") (type . "Compose")) ((name . "Compose file deploy reference") (path . "compose/compose-file/deploy/index") (type . "Compose")) ((name . "Compose file version 2 reference") (path . "compose/compose-file/compose-file-v2/index") (type . "Compose")) ((name . "Compose file version 3 reference") (path . "compose/compose-file/compose-file-v3/index") (type . "Compose")) ((name . "Compose file versions and upgrading") (path . "compose/compose-file/compose-versioning/index") (type . "Compose")) ((name . "Compose specification") (path . "compose/compose-file/index") (type . "Compose")) ((name . "Content trust in Docker") (path . "engine/security/trust/index") (type . "Engine: Security")) ((name . "Control startup and shutdown order in Compose") (path . "compose/startup-order/index") (type . "Compose")) ((name . "Create a swarm") (path . "engine/swarm/swarm-tutorial/create-swarm/index") (type . "Engine")) ((name . "Declare default environment variables in file") (path . "compose/env-file/index") (type . "Compose")) ((name . "Delegations for content trust") (path . "engine/security/trust/trust_delegation/index") (type . "Engine: Security")) ((name . "Delete the service running on the swarm") (path . "engine/swarm/swarm-tutorial/delete-service/index") (type . "Engine")) ((name . "Deploy a service to the swarm") (path . "engine/swarm/swarm-tutorial/deploy-service/index") (type . "Engine")) ((name . "Deploy a stack to a swarm") (path . "engine/swarm/stack-deploy/index") (type . "Engine")) ((name . "Deploy Notary Server with Compose") (path . "engine/security/trust/deploying_notary/index") (type . "Engine: Security")) ((name . "Deploy services to a swarm") (path . "engine/swarm/services/index") (type . "Engine")) ((name . "Deploy to Kubernetes") (path . "get-started/kube-deploy/index") (type . "Get Started")) ((name . "Deploy to Swarm") (path . "get-started/swarm-deploy/index") (type . "Get Started")) ((name . "Deprecated Engine Features") (path . "engine/deprecated/index") (type . "Engine")) ((name . "Develop with Docker Engine API") (path . "engine/api/index") (type . "Engine")) ((name . "Develop with Docker Engine SDKs") (path . "engine/api/sdk/index") (type . "Engine")) ((name . "docker") (path . "engine/reference/commandline/docker/index") (type . "Engine: CLI")) ((name . "docker attach") (path . "engine/reference/commandline/attach/index") (type . "Engine: CLI")) ((name . "docker build") (path . "engine/reference/commandline/build/index") (type . "Engine: CLI")) ((name . "docker builder") (path . "engine/reference/commandline/builder/index") (type . "Engine: CLI")) ((name . "docker builder build") (path . "engine/reference/commandline/builder_build/index") (type . "Engine: CLI")) ((name . "docker builder prune") (path . "engine/reference/commandline/builder_prune/index") (type . "Engine: CLI")) ((name . "docker checkpoint") (path . "engine/reference/commandline/checkpoint/index") (type . "Engine: CLI")) ((name . "docker checkpoint create") (path . "engine/reference/commandline/checkpoint_create/index") (type . "Engine: CLI")) ((name . "docker checkpoint ls") (path . "engine/reference/commandline/checkpoint_ls/index") (type . "Engine: CLI")) ((name . "docker checkpoint rm") (path . "engine/reference/commandline/checkpoint_rm/index") (type . "Engine: CLI")) ((name . "docker commit") (path . "engine/reference/commandline/commit/index") (type . "Engine: CLI")) ((name . "docker config") (path . "engine/reference/commandline/config/index") (type . "Engine: CLI")) ((name . "docker config create") (path . "engine/reference/commandline/config_create/index") (type . "Engine: CLI")) ((name . "docker config inspect") (path . "engine/reference/commandline/config_inspect/index") (type . "Engine: CLI")) ((name . "docker config ls") (path . "engine/reference/commandline/config_ls/index") (type . "Engine: CLI")) ((name . "docker config rm") (path . "engine/reference/commandline/config_rm/index") (type . "Engine: CLI")) ((name . "docker container") (path . "engine/reference/commandline/container/index") (type . "Engine: CLI")) ((name . "docker container attach") (path . "engine/reference/commandline/container_attach/index") (type . "Engine: CLI")) ((name . "docker container commit") (path . "engine/reference/commandline/container_commit/index") (type . "Engine: CLI")) ((name . "docker container cp") (path . "engine/reference/commandline/container_cp/index") (type . "Engine: CLI")) ((name . "docker container create") (path . "engine/reference/commandline/container_create/index") (type . "Engine: CLI")) ((name . "docker container diff") (path . "engine/reference/commandline/container_diff/index") (type . "Engine: CLI")) ((name . "docker container exec") (path . "engine/reference/commandline/container_exec/index") (type . "Engine: CLI")) ((name . "docker container export") (path . "engine/reference/commandline/container_export/index") (type . "Engine: CLI")) ((name . "docker container inspect") (path . "engine/reference/commandline/container_inspect/index") (type . "Engine: CLI")) ((name . "docker container kill") (path . "engine/reference/commandline/container_kill/index") (type . "Engine: CLI")) ((name . "docker container logs") (path . "engine/reference/commandline/container_logs/index") (type . "Engine: CLI")) ((name . "docker container ls") (path . "engine/reference/commandline/container_ls/index") (type . "Engine: CLI")) ((name . "docker container pause") (path . "engine/reference/commandline/container_pause/index") (type . "Engine: CLI")) ((name . "docker container port") (path . "engine/reference/commandline/container_port/index") (type . "Engine: CLI")) ((name . "docker container prune") (path . "engine/reference/commandline/container_prune/index") (type . "Engine: CLI")) ((name . "docker container rename") (path . "engine/reference/commandline/container_rename/index") (type . "Engine: CLI")) ((name . "docker container restart") (path . "engine/reference/commandline/container_restart/index") (type . "Engine: CLI")) ((name . "docker container rm") (path . "engine/reference/commandline/container_rm/index") (type . "Engine: CLI")) ((name . "docker container run") (path . "engine/reference/commandline/container_run/index") (type . "Engine: CLI")) ((name . "docker container start") (path . "engine/reference/commandline/container_start/index") (type . "Engine: CLI")) ((name . "docker container stats") (path . "engine/reference/commandline/container_stats/index") (type . "Engine: CLI")) ((name . "docker container stop") (path . "engine/reference/commandline/container_stop/index") (type . "Engine: CLI")) ((name . "docker container top") (path . "engine/reference/commandline/container_top/index") (type . "Engine: CLI")) ((name . "docker container unpause") (path . "engine/reference/commandline/container_unpause/index") (type . "Engine: CLI")) ((name . "docker container update") (path . "engine/reference/commandline/container_update/index") (type . "Engine: CLI")) ((name . "docker container wait") (path . "engine/reference/commandline/container_wait/index") (type . "Engine: CLI")) ((name . "docker context") (path . "engine/reference/commandline/context/index") (type . "Engine: CLI")) ((name . "Docker Context") (path . "engine/context/working-with-contexts/index") (type . "Engine")) ((name . "docker context create") (path . "engine/reference/commandline/context_create/index") (type . "Engine: CLI")) ((name . "docker context export") (path . "engine/reference/commandline/context_export/index") (type . "Engine: CLI")) ((name . "docker context import") (path . "engine/reference/commandline/context_import/index") (type . "Engine: CLI")) ((name . "docker context inspect") (path . "engine/reference/commandline/context_inspect/index") (type . "Engine: CLI")) ((name . "docker context ls") (path . "engine/reference/commandline/context_ls/index") (type . "Engine: CLI")) ((name . "docker context rm") (path . "engine/reference/commandline/context_rm/index") (type . "Engine: CLI")) ((name . "docker context update") (path . "engine/reference/commandline/context_update/index") (type . "Engine: CLI")) ((name . "docker context use") (path . "engine/reference/commandline/context_use/index") (type . "Engine: CLI")) ((name . "docker cp") (path . "engine/reference/commandline/cp/index") (type . "Engine: CLI")) ((name . "docker create") (path . "engine/reference/commandline/create/index") (type . "Engine: CLI")) ((name . "docker diff") (path . "engine/reference/commandline/diff/index") (type . "Engine: CLI")) ((name . "Docker Engine managed plugin system") (path . "engine/extend/index") (type . "Engine: Extend")) ((name . "Docker Engine release notes") (path . "engine/release-notes/index") (type . "Engine")) ((name . "docker events") (path . "engine/reference/commandline/events/index") (type . "Engine: CLI")) ((name . "docker exec") (path . "engine/reference/commandline/exec/index") (type . "Engine: CLI")) ((name . "docker export") (path . "engine/reference/commandline/export/index") (type . "Engine: CLI")) ((name . "docker history") (path . "engine/reference/commandline/history/index") (type . "Engine: CLI")) ((name . "docker image") (path . "engine/reference/commandline/image/index") (type . "Engine: CLI")) ((name . "docker image build") (path . "engine/reference/commandline/image_build/index") (type . "Engine: CLI")) ((name . "docker image history") (path . "engine/reference/commandline/image_history/index") (type . "Engine: CLI")) ((name . "docker image import") (path . "engine/reference/commandline/image_import/index") (type . "Engine: CLI")) ((name . "docker image inspect") (path . "engine/reference/commandline/image_inspect/index") (type . "Engine: CLI")) ((name . "docker image load") (path . "engine/reference/commandline/image_load/index") (type . "Engine: CLI")) ((name . "docker image ls") (path . "engine/reference/commandline/image_ls/index") (type . "Engine: CLI")) ((name . "docker image prune") (path . "engine/reference/commandline/image_prune/index") (type . "Engine: CLI")) ((name . "docker image pull") (path . "engine/reference/commandline/image_pull/index") (type . "Engine: CLI")) ((name . "docker image push") (path . "engine/reference/commandline/image_push/index") (type . "Engine: CLI")) ((name . "docker image rm") (path . "engine/reference/commandline/image_rm/index") (type . "Engine: CLI")) ((name . "docker image save") (path . "engine/reference/commandline/image_save/index") (type . "Engine: CLI")) ((name . "docker image tag") (path . "engine/reference/commandline/image_tag/index") (type . "Engine: CLI")) ((name . "docker images") (path . "engine/reference/commandline/images/index") (type . "Engine: CLI")) ((name . "docker import") (path . "engine/reference/commandline/import/index") (type . "Engine: CLI")) ((name . "docker info") (path . "engine/reference/commandline/info/index") (type . "Engine: CLI")) ((name . "docker inspect") (path . "engine/reference/commandline/inspect/index") (type . "Engine: CLI")) ((name . "docker kill") (path . "engine/reference/commandline/kill/index") (type . "Engine: CLI")) ((name . "docker load") (path . "engine/reference/commandline/load/index") (type . "Engine: CLI")) ((name . "docker login") (path . "engine/reference/commandline/login/index") (type . "Engine: CLI")) ((name . "docker logout") (path . "engine/reference/commandline/logout/index") (type . "Engine: CLI")) ((name . "docker logs") (path . "engine/reference/commandline/logs/index") (type . "Engine: CLI")) ((name . "docker manifest") (path . "engine/reference/commandline/manifest/index") (type . "Engine: CLI")) ((name . "docker manifest annotate") (path . "engine/reference/commandline/manifest_annotate/index") (type . "Engine: CLI")) ((name . "docker manifest create") (path . "engine/reference/commandline/manifest_create/index") (type . "Engine: CLI")) ((name . "docker manifest inspect") (path . "engine/reference/commandline/manifest_inspect/index") (type . "Engine: CLI")) ((name . "docker manifest push") (path . "engine/reference/commandline/manifest_push/index") (type . "Engine: CLI")) ((name . "docker manifest rm") (path . "engine/reference/commandline/manifest_rm/index") (type . "Engine: CLI")) ((name . "docker network") (path . "engine/reference/commandline/network/index") (type . "Engine: CLI")) ((name . "docker network connect") (path . "engine/reference/commandline/network_connect/index") (type . "Engine: CLI")) ((name . "docker network create") (path . "engine/reference/commandline/network_create/index") (type . "Engine: CLI")) ((name . "docker network disconnect") (path . "engine/reference/commandline/network_disconnect/index") (type . "Engine: CLI")) ((name . "Docker network driver plugins") (path . "engine/extend/plugins_network/index") (type . "Engine: Extend")) ((name . "docker network inspect") (path . "engine/reference/commandline/network_inspect/index") (type . "Engine: CLI")) ((name . "docker network ls") (path . "engine/reference/commandline/network_ls/index") (type . "Engine: CLI")) ((name . "docker network prune") (path . "engine/reference/commandline/network_prune/index") (type . "Engine: CLI")) ((name . "docker network rm") (path . "engine/reference/commandline/network_rm/index") (type . "Engine: CLI")) ((name . "docker node") (path . "engine/reference/commandline/node/index") (type . "Engine: CLI")) ((name . "docker node demote") (path . "engine/reference/commandline/node_demote/index") (type . "Engine: CLI")) ((name . "docker node inspect") (path . "engine/reference/commandline/node_inspect/index") (type . "Engine: CLI")) ((name . "docker node ls") (path . "engine/reference/commandline/node_ls/index") (type . "Engine: CLI")) ((name . "docker node promote") (path . "engine/reference/commandline/node_promote/index") (type . "Engine: CLI")) ((name . "docker node ps") (path . "engine/reference/commandline/node_ps/index") (type . "Engine: CLI")) ((name . "docker node rm") (path . "engine/reference/commandline/node_rm/index") (type . "Engine: CLI")) ((name . "docker node update") (path . "engine/reference/commandline/node_update/index") (type . "Engine: CLI")) ((name . "Docker overview") (path . "get-started/overview/index") (type . "Get Started")) ((name . "docker pause") (path . "engine/reference/commandline/pause/index") (type . "Engine: CLI")) ((name . "docker plugin") (path . "engine/reference/commandline/plugin/index") (type . "Engine: CLI")) ((name . "Docker Plugin API") (path . "engine/extend/plugin_api/index") (type . "Engine: Extend")) ((name . "docker plugin create") (path . "engine/reference/commandline/plugin_create/index") (type . "Engine: CLI")) ((name . "docker plugin disable") (path . "engine/reference/commandline/plugin_disable/index") (type . "Engine: CLI")) ((name . "docker plugin enable") (path . "engine/reference/commandline/plugin_enable/index") (type . "Engine: CLI")) ((name . "docker plugin inspect") (path . "engine/reference/commandline/plugin_inspect/index") (type . "Engine: CLI")) ((name . "docker plugin install") (path . "engine/reference/commandline/plugin_install/index") (type . "Engine: CLI")) ((name . "docker plugin ls") (path . "engine/reference/commandline/plugin_ls/index") (type . "Engine: CLI")) ((name . "docker plugin push") (path . "engine/reference/commandline/plugin_push/index") (type . "Engine: CLI")) ((name . "docker plugin rm") (path . "engine/reference/commandline/plugin_rm/index") (type . "Engine: CLI")) ((name . "docker plugin set") (path . "engine/reference/commandline/plugin_set/index") (type . "Engine: CLI")) ((name . "docker plugin upgrade") (path . "engine/reference/commandline/plugin_upgrade/index") (type . "Engine: CLI")) ((name . "docker port") (path . "engine/reference/commandline/port/index") (type . "Engine: CLI")) ((name . "docker ps") (path . "engine/reference/commandline/ps/index") (type . "Engine: CLI")) ((name . "docker pull") (path . "engine/reference/commandline/pull/index") (type . "Engine: CLI")) ((name . "docker push") (path . "engine/reference/commandline/push/index") (type . "Engine: CLI")) ((name . "docker rename") (path . "engine/reference/commandline/rename/index") (type . "Engine: CLI")) ((name . "docker restart") (path . "engine/reference/commandline/restart/index") (type . "Engine: CLI")) ((name . "docker rm") (path . "engine/reference/commandline/rm/index") (type . "Engine: CLI")) ((name . "docker rmi") (path . "engine/reference/commandline/rmi/index") (type . "Engine: CLI")) ((name . "docker run") (path . "engine/reference/commandline/run/index") (type . "Engine: CLI")) ((name . "Docker run reference") (path . "engine/reference/run/index") (type . "Engine")) ((name . "docker save") (path . "engine/reference/commandline/save/index") (type . "Engine: CLI")) ((name . "docker search") (path . "engine/reference/commandline/search/index") (type . "Engine: CLI")) ((name . "docker secret") (path . "engine/reference/commandline/secret/index") (type . "Engine: CLI")) ((name . "docker secret create") (path . "engine/reference/commandline/secret_create/index") (type . "Engine: CLI")) ((name . "docker secret inspect") (path . "engine/reference/commandline/secret_inspect/index") (type . "Engine: CLI")) ((name . "docker secret ls") (path . "engine/reference/commandline/secret_ls/index") (type . "Engine: CLI")) ((name . "docker secret rm") (path . "engine/reference/commandline/secret_rm/index") (type . "Engine: CLI")) ((name . "Docker security") (path . "engine/security/index") (type . "Engine: Security")) ((name . "docker service") (path . "engine/reference/commandline/service/index") (type . "Engine: CLI")) ((name . "docker service create") (path . "engine/reference/commandline/service_create/index") (type . "Engine: CLI")) ((name . "docker service inspect") (path . "engine/reference/commandline/service_inspect/index") (type . "Engine: CLI")) ((name . "docker service logs") (path . "engine/reference/commandline/service_logs/index") (type . "Engine: CLI")) ((name . "docker service ls") (path . "engine/reference/commandline/service_ls/index") (type . "Engine: CLI")) ((name . "docker service ps") (path . "engine/reference/commandline/service_ps/index") (type . "Engine: CLI")) ((name . "docker service rm") (path . "engine/reference/commandline/service_rm/index") (type . "Engine: CLI")) ((name . "docker service rollback") (path . "engine/reference/commandline/service_rollback/index") (type . "Engine: CLI")) ((name . "docker service scale") (path . "engine/reference/commandline/service_scale/index") (type . "Engine: CLI")) ((name . "docker service update") (path . "engine/reference/commandline/service_update/index") (type . "Engine: CLI")) ((name . "docker stack") (path . "engine/reference/commandline/stack/index") (type . "Engine: CLI")) ((name . "docker stack deploy") (path . "engine/reference/commandline/stack_deploy/index") (type . "Engine: CLI")) ((name . "docker stack ls") (path . "engine/reference/commandline/stack_ls/index") (type . "Engine: CLI")) ((name . "docker stack ps") (path . "engine/reference/commandline/stack_ps/index") (type . "Engine: CLI")) ((name . "docker stack rm") (path . "engine/reference/commandline/stack_rm/index") (type . "Engine: CLI")) ((name . "docker stack services") (path . "engine/reference/commandline/stack_services/index") (type . "Engine: CLI")) ((name . "docker start") (path . "engine/reference/commandline/start/index") (type . "Engine: CLI")) ((name . "docker stats") (path . "engine/reference/commandline/stats/index") (type . "Engine: CLI")) ((name . "docker stop") (path . "engine/reference/commandline/stop/index") (type . "Engine: CLI")) ((name . "docker swarm") (path . "engine/reference/commandline/swarm/index") (type . "Engine: CLI")) ((name . "docker swarm ca") (path . "engine/reference/commandline/swarm_ca/index") (type . "Engine: CLI")) ((name . "docker swarm init") (path . "engine/reference/commandline/swarm_init/index") (type . "Engine: CLI")) ((name . "docker swarm join") (path . "engine/reference/commandline/swarm_join/index") (type . "Engine: CLI")) ((name . "docker swarm join-token") (path . "engine/reference/commandline/swarm_join-token/index") (type . "Engine: CLI")) ((name . "docker swarm leave") (path . "engine/reference/commandline/swarm_leave/index") (type . "Engine: CLI")) ((name . "docker swarm unlock") (path . "engine/reference/commandline/swarm_unlock/index") (type . "Engine: CLI")) ((name . "docker swarm unlock-key") (path . "engine/reference/commandline/swarm_unlock-key/index") (type . "Engine: CLI")) ((name . "docker swarm update") (path . "engine/reference/commandline/swarm_update/index") (type . "Engine: CLI")) ((name . "docker system") (path . "engine/reference/commandline/system/index") (type . "Engine: CLI")) ((name . "docker system df") (path . "engine/reference/commandline/system_df/index") (type . "Engine: CLI")) ((name . "docker system events") (path . "engine/reference/commandline/system_events/index") (type . "Engine: CLI")) ((name . "docker system info") (path . "engine/reference/commandline/system_info/index") (type . "Engine: CLI")) ((name . "docker system prune") (path . "engine/reference/commandline/system_prune/index") (type . "Engine: CLI")) ((name . "docker tag") (path . "engine/reference/commandline/tag/index") (type . "Engine: CLI")) ((name . "docker top") (path . "engine/reference/commandline/top/index") (type . "Engine: CLI")) ((name . "docker trust") (path . "engine/reference/commandline/trust/index") (type . "Engine: CLI")) ((name . "docker trust inspect") (path . "engine/reference/commandline/trust_inspect/index") (type . "Engine: CLI")) ((name . "docker trust key") (path . "engine/reference/commandline/trust_key/index") (type . "Engine: CLI")) ((name . "docker trust key generate") (path . "engine/reference/commandline/trust_key_generate/index") (type . "Engine: CLI")) ((name . "docker trust key load") (path . "engine/reference/commandline/trust_key_load/index") (type . "Engine: CLI")) ((name . "docker trust revoke") (path . "engine/reference/commandline/trust_revoke/index") (type . "Engine: CLI")) ((name . "docker trust sign") (path . "engine/reference/commandline/trust_sign/index") (type . "Engine: CLI")) ((name . "docker trust signer") (path . "engine/reference/commandline/trust_signer/index") (type . "Engine: CLI")) ((name . "docker trust signer add") (path . "engine/reference/commandline/trust_signer_add/index") (type . "Engine: CLI")) ((name . "docker trust signer remove") (path . "engine/reference/commandline/trust_signer_remove/index") (type . "Engine: CLI")) ((name . "docker unpause") (path . "engine/reference/commandline/unpause/index") (type . "Engine: CLI")) ((name . "docker update") (path . "engine/reference/commandline/update/index") (type . "Engine: CLI")) ((name . "docker version") (path . "engine/reference/commandline/version/index") (type . "Engine: CLI")) ((name . "docker volume") (path . "engine/reference/commandline/volume/index") (type . "Engine: CLI")) ((name . "docker volume create") (path . "engine/reference/commandline/volume_create/index") (type . "Engine: CLI")) ((name . "docker volume inspect") (path . "engine/reference/commandline/volume_inspect/index") (type . "Engine: CLI")) ((name . "docker volume ls") (path . "engine/reference/commandline/volume_ls/index") (type . "Engine: CLI")) ((name . "Docker volume plugins") (path . "engine/extend/plugins_volume/index") (type . "Engine: Extend")) ((name . "docker volume prune") (path . "engine/reference/commandline/volume_prune/index") (type . "Engine: CLI")) ((name . "docker volume rm") (path . "engine/reference/commandline/volume_rm/index") (type . "Engine: CLI")) ((name . "docker wait") (path . "engine/reference/commandline/wait/index") (type . "Engine: CLI")) ((name . "docker-compose config") (path . "compose/reference/config/index") (type . "Compose")) ((name . "docker-compose pull") (path . "compose/reference/pull/index") (type . "Compose")) ((name . "docker-compose stop") (path . "compose/reference/stop/index") (type . "Compose")) ((name . "docker-compose up") (path . "compose/reference/up/index") (type . "Compose")) ((name . "dockerd") (path . "engine/reference/commandline/dockerd/index") (type . "Engine: CLI")) ((name . "Dockerfile reference") (path . "engine/reference/builder/index") (type . "Engine")) ((name . "Drain a node on the swarm") (path . "engine/swarm/swarm-tutorial/drain-node/index") (type . "Engine")) ((name . "Educational resources") (path . "get-started/resources/index") (type . "Get Started")) ((name . "Enabling GPU access with Compose") (path . "compose/gpu-support/index") (type . "Compose")) ((name . "Engine") (path . "engine/index") (type . "Engine")) ((name . "Environment variables in Compose") (path . "compose/environment-variables/index") (type . "Compose")) ((name . "Examples using the Docker Engine SDKs and Docker API") (path . "engine/api/sdk/examples/index") (type . "Engine")) ((name . "Frequently asked questions") (path . "compose/faq/index") (type . "Compose")) ((name . "Get started with Docker Compose") (path . "compose/gettingstarted/index") (type . "Compose")) ((name . "Getting started with swarm mode") (path . "engine/swarm/swarm-tutorial/index") (type . "Engine")) ((name . "How nodes work") (path . "engine/swarm/how-swarm-mode-works/nodes/index") (type . "Engine")) ((name . "How services work") (path . "engine/swarm/how-swarm-mode-works/services/index") (type . "Engine")) ((name . "Inspect a service on the swarm") (path . "engine/swarm/swarm-tutorial/inspect-service/index") (type . "Engine")) ((name . "Install Docker Compose") (path . "compose/install/index") (type . "Compose")) ((name . "Install Docker Engine") (path . "engine/install/index") (type . "Engine")) ((name . "Install Docker Engine from binaries") (path . "engine/install/binaries/index") (type . "Engine")) ((name . "Install Docker Engine on CentOS") (path . "engine/install/centos/index") (type . "Engine")) ((name . "Install Docker Engine on Debian") (path . "engine/install/debian/index") (type . "Engine")) ((name . "Install Docker Engine on Fedora") (path . "engine/install/fedora/index") (type . "Engine")) ((name . "Install Docker Engine on RHEL") (path . "engine/install/rhel/index") (type . "Engine")) ((name . "Install Docker Engine on SLES") (path . "engine/install/sles/index") (type . "Engine")) ((name . "Install Docker Engine on Ubuntu") (path . "engine/install/ubuntu/index") (type . "Engine")) ((name . "Isolate containers with a user namespace") (path . "engine/security/userns-remap/index") (type . "Engine: Security")) ((name . "Join nodes to a swarm") (path . "engine/swarm/join-nodes/index") (type . "Engine")) ((name . "Lock your swarm to protect its encryption key") (path . "engine/swarm/swarm_manager_locking/index") (type . "Engine")) ((name . "Machine") (path . "machine/index") (type . "Machine")) ((name . "Manage keys for content trust") (path . "engine/security/trust/trust_key_mng/index") (type . "Engine: Security")) ((name . "Manage nodes in a swarm") (path . "engine/swarm/manage-nodes/index") (type . "Engine")) ((name . "Manage sensitive data with Docker secrets") (path . "engine/swarm/secrets/index") (type . "Engine")) ((name . "Manage swarm security with public key infrastructure (PKI)") (path . "engine/swarm/how-swarm-mode-works/pki/index") (type . "Engine")) ((name . "Networking in Compose") (path . "compose/networking/index") (type . "Compose")) ((name . "Orchestration") (path . "get-started/orchestration/index") (type . "Get Started")) ((name . "Orientation and setup") (path . "get-started/index") (type . "Get Started")) ((name . "Overview of docker-compose CLI") (path . "compose/reference/index") (type . "Compose")) ((name . "Play in a content trust sandbox") (path . "engine/security/trust/trust_sandbox/index") (type . "Engine: Security")) ((name . "Plugin Config Version 1 of Plugin V2") (path . "engine/extend/config/index") (type . "Engine: Extend")) ((name . "Post-installation steps for Linux") (path . "engine/install/linux-postinstall/index") (type . "Engine")) ((name . "Protect the Docker daemon socket") (path . "engine/security/protect-access/index") (type . "Engine: Security")) ((name . "Raft consensus in swarm mode") (path . "engine/swarm/raft/index") (type . "Engine")) ((name . "Run Docker Engine in swarm mode") (path . "engine/swarm/swarm-mode/index") (type . "Engine")) ((name . "Run the Docker daemon as a non-root user (Rootless mode)") (path . "engine/security/rootless/index") (type . "Engine: Security")) ((name . "Sample application") (path . "get-started/02_our_app/index") (type . "Get Started")) ((name . "Sample apps with Compose") (path . "compose/samples-for-compose/index") (type . "Compose")) ((name . "Scale the service in the swarm") (path . "engine/swarm/swarm-tutorial/scale-service/index") (type . "Engine")) ((name . "Seccomp security profiles for Docker") (path . "engine/security/seccomp/index") (type . "Engine: Security")) ((name . "Share Compose configurations between files and projects") (path . "compose/extends/index") (type . "Compose")) ((name . "Share the application") (path . "get-started/04_sharing_app/index") (type . "Get Started")) ((name . "Store configuration data using Docker Configs") (path . "engine/swarm/configs/index") (type . "Engine")) ((name . "Swarm mode key concepts") (path . "engine/swarm/key-concepts/index") (type . "Engine")) ((name . "Swarm mode overview") (path . "engine/swarm/index") (type . "Engine")) ((name . "Use Compose in production") (path . "compose/production/index") (type . "Compose")) ((name . "Use Docker Engine plugins") (path . "engine/extend/legacy_plugins/index") (type . "Engine: Extend")) ((name . "Use swarm mode routing mesh") (path . "engine/swarm/ingress/index") (type . "Engine")) ((name . "Use the Docker command line") (path . "engine/reference/commandline/cli/index") (type . "Engine: CLI")) ((name . "Using profiles with Compose") (path . "compose/profiles/index") (type . "Compose")) ((name . "Verify repository client with certificates") (path . "engine/security/certificates/index") (type . "Engine: Security")) ((name . "Vulnerability scanning for Docker local images") (path . "engine/scan/index") (type . "Engine"))]) (types . [((name . "Compose") (count . 26) (slug . "compose")) ((name . "Engine") (count . 44) (slug . "engine")) ((name . "Engine: CLI") (count . 187) (slug . "engine-cli")) ((name . "Engine: Extend") (count . 7) (slug . "engine-extend")) ((name . "Engine: Security") (count . 13) (slug . "engine-security")) ((name . "Get Started") (count . 8) (slug . "get-started")) ((name . "Machine") (count . 1) (slug . "machine"))])) \ No newline at end of file diff --git a/devdocs/docker/index.html b/devdocs/docker/index.html new file mode 100644 index 00000000..f9fbeddd --- /dev/null +++ b/devdocs/docker/index.html @@ -0,0 +1,6 @@ +

Docker Documentation

+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/ +

+
diff --git a/devdocs/docker/machine%2Findex.html b/devdocs/docker/machine%2Findex.html new file mode 100644 index 00000000..9cfea67f --- /dev/null +++ b/devdocs/docker/machine%2Findex.html @@ -0,0 +1,9 @@ +

Docker Machine

+

Deprecated

Docker Machine has been deprecated. Please use Docker Desktop instead. See Docker Desktop for Mac and Docker Desktop for Windows. You can also use other cloud provisioning tools.

The source code for Docker Machine has been archived. You can find the source code on GitHub.

+

docker, machine

+
+

+ © 2019 Docker, Inc.
Licensed under the Apache License, Version 2.0.
Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
Docker, Inc. and other parties may also have trademark rights in other terms used herein.
+ https://docs.docker.com/machine/ +

+
diff --git a/devdocs/docker/metadata b/devdocs/docker/metadata new file mode 100644 index 00000000..8e80d2a2 --- /dev/null +++ b/devdocs/docker/metadata @@ -0,0 +1,4 @@ +(1 (name . "Docker") (slug . "docker") (type . "simple") (links (home . "https://docker.com/") (code . "https://github.com/docker/docker")) (version . "") (release . "20.10.16") (mtime . 1654206621) (db_size . 4328150) (attribution . "© 2019 Docker, Inc.
+ Licensed under the Apache License, Version 2.0.
+ Docker and the Docker logo are trademarks or registered trademarks of Docker, Inc. in the United States and/or other countries.
+ Docker, Inc. and other parties may also have trademark rights in other terms used herein.")) \ No newline at end of file -- cgit v1.2.3