summaryrefslogtreecommitdiffstats
path: root/images/installer
diff options
context:
space:
mode:
Diffstat (limited to 'images/installer')
-rw-r--r--images/installer/Dockerfile62
-rw-r--r--images/installer/Dockerfile.rhel755
-rw-r--r--images/installer/README_CONTAINER_IMAGE.md (renamed from images/installer/system-container/README.md)27
-rw-r--r--images/installer/root/exports/config.json.template (renamed from images/installer/system-container/root/exports/config.json.template)0
-rw-r--r--images/installer/root/exports/manifest.json (renamed from images/installer/system-container/root/exports/manifest.json)2
-rw-r--r--images/installer/root/exports/service.template (renamed from images/installer/system-container/root/exports/service.template)0
-rw-r--r--images/installer/root/exports/tmpfiles.template (renamed from images/installer/system-container/root/exports/tmpfiles.template)0
-rwxr-xr-ximages/installer/root/usr/local/bin/entrypoint17
-rwxr-xr-ximages/installer/root/usr/local/bin/run46
-rwxr-xr-ximages/installer/root/usr/local/bin/run-system-container.sh (renamed from images/installer/system-container/root/usr/local/bin/run-system-container.sh)0
-rwxr-xr-ximages/installer/root/usr/local/bin/usage33
-rwxr-xr-ximages/installer/root/usr/local/bin/usage.ocp33
-rwxr-xr-ximages/installer/root/usr/local/bin/user_setup17
13 files changed, 224 insertions, 68 deletions
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index 915dfe377..d03f33a1d 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -1,10 +1,18 @@
-# Using playbook2image as a base
-# See https://github.com/openshift/playbook2image for details on the image
-# including documentation for the settings/env vars referenced below
-FROM registry.centos.org/openshift/playbook2image:latest
+FROM centos:7
MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
+USER root
+
+# install ansible and deps
+RUN INSTALL_PKGS="python-lxml pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless httpd-tools openssh-clients" \
+ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
+ && EPEL_PKGS="ansible python-passlib python2-boto" \
+ && yum install -y epel-release \
+ && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
+ && rpm -q $INSTALL_PKGS $EPEL_PKGS \
+ && yum clean all
+
LABEL name="openshift/origin-ansible" \
summary="OpenShift's installation and configuration tool" \
description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
@@ -12,40 +20,24 @@ LABEL name="openshift/origin-ansible" \
io.k8s.display-name="openshift-ansible" \
io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
io.openshift.expose-services="" \
- io.openshift.tags="openshift,install,upgrade,ansible"
+ io.openshift.tags="openshift,install,upgrade,ansible" \
+ atomic.run="once"
-USER root
+ENV USER_UID=1001 \
+ HOME=/opt/app-root/src \
+ WORK_DIR=/usr/share/ansible/openshift-ansible \
+ OPTS="-v"
-# Create a symlink to /opt/app-root/src so that files under /usr/share/ansible are accessible.
-# This is required since the system-container uses by default the playbook under
-# /usr/share/ansible/openshift-ansible. With this change we won't need to keep two different
-# configurations for the two images.
-RUN mkdir -p /usr/share/ansible/ && ln -s /opt/app-root/src /usr/share/ansible/openshift-ansible
+# Add image scripts and files for running as a system container
+COPY images/installer/root /
+# Include playbooks, roles, plugins, etc. from this repo
+COPY . ${WORK_DIR}
-RUN INSTALL_PKGS="skopeo openssl java-1.8.0-openjdk-headless httpd-tools" && \
- yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \
- rpm -V $INSTALL_PKGS && \
- yum clean all
+RUN /usr/local/bin/user_setup \
+ && rm /usr/local/bin/usage.ocp
USER ${USER_UID}
-# The playbook to be run is specified via the PLAYBOOK_FILE env var.
-# This sets a default of openshift_facts.yml as it's an informative playbook
-# that can help test that everything is set properly (inventory, sshkeys)
-ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
- OPTS="-v" \
- INSTALL_OC=true
-
-# playbook2image's assemble script expects the source to be available in
-# /tmp/src (as per the source-to-image specs) so we import it there
-ADD . /tmp/src
-
-# Running the 'assemble' script provided by playbook2image will install
-# dependencies specified in requirements.txt and install the 'oc' client
-# as per the INSTALL_OC environment setting above
-RUN /usr/libexec/s2i/assemble
-
-# Add files for running as a system container
-COPY images/installer/system-container/root /
-
-CMD [ "/usr/libexec/s2i/run" ]
+WORKDIR ${WORK_DIR}
+ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
+CMD [ "/usr/local/bin/run" ]
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 9d7eeec24..3110f409c 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -1,7 +1,18 @@
-FROM openshift3/playbook2image
+FROM rhel7.3:7.3-released
MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
+USER root
+
+# Playbooks, roles, and their dependencies are installed from packages.
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto openssl java-1.8.0-openjdk-headless httpd-tools" \
+ && yum repolist > /dev/null \
+ && yum-config-manager --enable rhel-7-server-ose-3.6-rpms \
+ && yum-config-manager --enable rhel-7-server-rh-common-rpms \
+ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
+ && rpm -q $INSTALL_PKGS \
+ && yum clean all
+
LABEL name="openshift3/ose-ansible" \
summary="OpenShift's installation and configuration tool" \
description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
@@ -11,35 +22,25 @@ LABEL name="openshift3/ose-ansible" \
io.openshift.expose-services="" \
io.openshift.tags="openshift,install,upgrade,ansible" \
com.redhat.component="aos3-installation-docker" \
- version="v3.4.1" \
+ version="v3.6.0" \
release="1" \
- architecture="x86_64"
-
-# Playbooks, roles and their dependencies are installed from packages.
-# Unlike in Dockerfile, we don't invoke the 'assemble' script here
-# because all content and dependencies (like 'oc') is already
-# installed via yum.
-USER root
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto" && \
- yum repolist > /dev/null && \
- yum-config-manager --enable rhel-7-server-ose-3.4-rpms && \
- yum-config-manager --enable rhel-7-server-rh-common-rpms && \
- yum install -y $INSTALL_PKGS && \
- yum clean all
+ architecture="x86_64" \
+ atomic.run="once"
-USER ${USER_UID}
-
-# The playbook to be run is specified via the PLAYBOOK_FILE env var.
-# This sets a default of openshift_facts.yml as it's an informative playbook
-# that can help test that everything is set properly (inventory, sshkeys).
-# As the playbooks are installed via packages instead of being copied to
-# $APP_HOME by the 'assemble' script, we set the WORK_DIR env var to the
-# location of openshift-ansible.
-ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ENV USER_UID=1001 \
+ HOME=/opt/app-root/src \
WORK_DIR=/usr/share/ansible/openshift-ansible \
+ ANSIBLE_CONFIG=/usr/share/atomic-openshift-utils/ansible.cfg \
OPTS="-v"
-# Add files for running as a system container
-COPY system-container/root /
+# Add image scripts and files for running as a system container
+COPY root /
+
+RUN /usr/local/bin/user_setup \
+ && mv /usr/local/bin/usage{.ocp,}
+
+USER ${USER_UID}
-CMD [ "/usr/libexec/s2i/run" ]
+WORKDIR ${WORK_DIR}
+ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
+CMD [ "/usr/local/bin/run" ]
diff --git a/images/installer/system-container/README.md b/images/installer/README_CONTAINER_IMAGE.md
index fbcd47c4a..bc1ebb4a8 100644
--- a/images/installer/system-container/README.md
+++ b/images/installer/README_CONTAINER_IMAGE.md
@@ -1,17 +1,34 @@
-# System container installer
+ORIGIN-ANSIBLE IMAGE INSTALLER
+===============================
+
+Contains Dockerfile information for building an openshift/origin-ansible image
+based on `centos:7` or `rhel7.3:7.3-released`.
+
+Read additional setup information for this image at: https://hub.docker.com/r/openshift/origin-ansible/
+
+Read additional information about the `openshift/origin-ansible` at: https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+Also contains necessary components for running the installer using an Atomic System Container.
+
+
+System container installer
+==========================
These files are needed to run the installer using an [Atomic System container](http://www.projectatomic.io/blog/2016/09/intro-to-system-containers/).
+These files can be found under `root/exports`:
* config.json.template - Template of the configuration file used for running containers.
-* manifest.json - Used to define various settings for the system container, such as the default values to use for the installation.
-
-* run-system-container.sh - Entrypoint to the container.
+* manifest.json - Used to define various settings for the system container, such as the default values to use for the installation.
* service.template - Template file for the systemd service.
* tmpfiles.template - Template file for systemd-tmpfiles.
+These files can be found under `root/usr/local/bin`:
+
+* run-system-container.sh - Entrypoint to the container.
+
## Options
These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``root/exports/manifest.json``
@@ -28,4 +45,4 @@ These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``r
* ANSIBLE_CONFIG - Full path for the ansible configuration file to use inside the container
-* INVENTORY_FILE - Full path for the inventory to use from the host
+* INVENTORY_FILE - Full path for the inventory to use from the host \ No newline at end of file
diff --git a/images/installer/system-container/root/exports/config.json.template b/images/installer/root/exports/config.json.template
index 739c0080f..739c0080f 100644
--- a/images/installer/system-container/root/exports/config.json.template
+++ b/images/installer/root/exports/config.json.template
diff --git a/images/installer/system-container/root/exports/manifest.json b/images/installer/root/exports/manifest.json
index 321a84ee8..8b984d7a3 100644
--- a/images/installer/system-container/root/exports/manifest.json
+++ b/images/installer/root/exports/manifest.json
@@ -6,7 +6,7 @@
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
"PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
"HOME_ROOT": "/root",
- "ANSIBLE_CONFIG": "/usr/share/ansible/openshift-ansible/ansible.cfg",
+ "ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
}
}
diff --git a/images/installer/system-container/root/exports/service.template b/images/installer/root/exports/service.template
index bf5316af6..bf5316af6 100644
--- a/images/installer/system-container/root/exports/service.template
+++ b/images/installer/root/exports/service.template
diff --git a/images/installer/system-container/root/exports/tmpfiles.template b/images/installer/root/exports/tmpfiles.template
index b1f6caf47..b1f6caf47 100644
--- a/images/installer/system-container/root/exports/tmpfiles.template
+++ b/images/installer/root/exports/tmpfiles.template
diff --git a/images/installer/root/usr/local/bin/entrypoint b/images/installer/root/usr/local/bin/entrypoint
new file mode 100755
index 000000000..777bf3f11
--- /dev/null
+++ b/images/installer/root/usr/local/bin/entrypoint
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+#
+# This file serves as the main entrypoint to the openshift-ansible image.
+#
+# For more information see the documentation:
+# https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+
+# Patch /etc/passwd file with the current user info.
+# The current user's entry must be correctly defined in this file in order for
+# the `ssh` command to work within the created container.
+
+if ! whoami &>/dev/null; then
+ echo "${USER:-default}:x:$(id -u):$(id -g):Default User:$HOME:/sbin/nologin" >> /etc/passwd
+fi
+
+exec "$@"
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
new file mode 100755
index 000000000..9401ea118
--- /dev/null
+++ b/images/installer/root/usr/local/bin/run
@@ -0,0 +1,46 @@
+#!/bin/bash -e
+#
+# This file serves as the default command to the openshift-ansible image.
+# Runs a playbook with inventory as specified by environment variables.
+#
+# For more information see the documentation:
+# https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+# SOURCE and HOME DIRECTORY: /opt/app-root/src
+
+if [[ -z "${PLAYBOOK_FILE}" ]]; then
+ echo
+ echo "PLAYBOOK_FILE must be provided."
+ exec /usr/local/bin/usage
+fi
+
+INVENTORY="$(mktemp)"
+if [[ -v INVENTORY_FILE ]]; then
+ # Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below
+ # does not attempt to modify the original
+ cp -a ${INVENTORY_FILE} ${INVENTORY}
+elif [[ -v INVENTORY_URL ]]; then
+ curl -o ${INVENTORY} ${INVENTORY_URL}
+elif [[ -v DYNAMIC_SCRIPT_URL ]]; then
+ curl -o ${INVENTORY} ${DYNAMIC_SCRIPT_URL}
+ chmod 755 ${INVENTORY}
+else
+ echo
+ echo "One of INVENTORY_FILE, INVENTORY_URL or DYNAMIC_SCRIPT_URL must be provided."
+ exec /usr/local/bin/usage
+fi
+INVENTORY_ARG="-i ${INVENTORY}"
+
+if [[ "$ALLOW_ANSIBLE_CONNECTION_LOCAL" = false ]]; then
+ sed -i s/ansible_connection=local// ${INVENTORY}
+fi
+
+if [[ -v VAULT_PASS ]]; then
+ VAULT_PASS_FILE=.vaultpass
+ echo ${VAULT_PASS} > ${VAULT_PASS_FILE}
+ VAULT_PASS_ARG="--vault-password-file ${VAULT_PASS_FILE}"
+fi
+
+cd ${WORK_DIR}
+
+exec ansible-playbook ${INVENTORY_ARG} ${VAULT_PASS_ARG} ${OPTS} ${PLAYBOOK_FILE}
diff --git a/images/installer/system-container/root/usr/local/bin/run-system-container.sh b/images/installer/root/usr/local/bin/run-system-container.sh
index 9ce7c7328..9ce7c7328 100755
--- a/images/installer/system-container/root/usr/local/bin/run-system-container.sh
+++ b/images/installer/root/usr/local/bin/run-system-container.sh
diff --git a/images/installer/root/usr/local/bin/usage b/images/installer/root/usr/local/bin/usage
new file mode 100755
index 000000000..3518d7f19
--- /dev/null
+++ b/images/installer/root/usr/local/bin/usage
@@ -0,0 +1,33 @@
+#!/bin/bash -e
+cat <<"EOF"
+
+The origin-ansible image provides several options to control the behaviour of the containers.
+For more details on these options see the documentation:
+
+ https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+At a minimum, when running a container using this image you must provide:
+
+* ssh keys so that Ansible can reach your hosts. These should be mounted as a volume under
+ /opt/app-root/src/.ssh
+* An inventory file. This can be mounted inside the container as a volume and specified with the
+ INVENTORY_FILE environment variable. Alternatively you can serve the inventory file from a web
+ server and use the INVENTORY_URL environment variable to fetch it.
+* The playbook to run. This is set using the PLAYBOOK_FILE environment variable.
+
+Here is an example of how to run a containerized origin-ansible with
+the openshift_facts playbook, which collects and displays facts about your
+OpenShift environment. The inventory and ssh keys are mounted as volumes
+(the latter requires setting the uid in the container and SELinux label
+in the key file via :Z so they can be accessed) and the PLAYBOOK_FILE
+environment variable is set to point to the playbook within the image:
+
+docker run -tu `id -u` \
+ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \
+ -v /etc/ansible/hosts:/tmp/inventory:Z,ro \
+ -e INVENTORY_FILE=/tmp/inventory \
+ -e OPTS="-v" \
+ -e PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ openshift/origin-ansible
+
+EOF
diff --git a/images/installer/root/usr/local/bin/usage.ocp b/images/installer/root/usr/local/bin/usage.ocp
new file mode 100755
index 000000000..50593af6e
--- /dev/null
+++ b/images/installer/root/usr/local/bin/usage.ocp
@@ -0,0 +1,33 @@
+#!/bin/bash -e
+cat <<"EOF"
+
+The ose-ansible image provides several options to control the behaviour of the containers.
+For more details on these options see the documentation:
+
+ https://github.com/openshift/openshift-ansible/blob/master/README_CONTAINER_IMAGE.md
+
+At a minimum, when running a container using this image you must provide:
+
+* ssh keys so that Ansible can reach your hosts. These should be mounted as a volume under
+ /opt/app-root/src/.ssh
+* An inventory file. This can be mounted inside the container as a volume and specified with the
+ INVENTORY_FILE environment variable. Alternatively you can serve the inventory file from a web
+ server and use the INVENTORY_URL environment variable to fetch it.
+* The playbook to run. This is set using the PLAYBOOK_FILE environment variable.
+
+Here is an example of how to run a containerized ose-ansible with
+the openshift_facts playbook, which collects and displays facts about your
+OpenShift environment. The inventory and ssh keys are mounted as volumes
+(the latter requires setting the uid in the container and SELinux label
+in the key file via :Z so they can be accessed) and the PLAYBOOK_FILE
+environment variable is set to point to the playbook within the image:
+
+docker run -tu `id -u` \
+ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \
+ -v /etc/ansible/hosts:/tmp/inventory:Z,ro \
+ -e INVENTORY_FILE=/tmp/inventory \
+ -e OPTS="-v" \
+ -e PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ openshift3/ose-ansible
+
+EOF
diff --git a/images/installer/root/usr/local/bin/user_setup b/images/installer/root/usr/local/bin/user_setup
new file mode 100755
index 000000000..b76e60a4d
--- /dev/null
+++ b/images/installer/root/usr/local/bin/user_setup
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -x
+
+# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
+mkdir -p ${HOME}
+chown ${USER_UID}:0 ${HOME}
+chmod ug+rwx ${HOME}
+
+# runtime user will need to be able to self-insert in /etc/passwd
+chmod g+rw /etc/passwd
+
+# ensure that the ansible content is accessible
+chmod -R g+r ${WORK_DIR}
+find ${WORK_DIR} -type d -exec chmod g+x {} +
+
+# no need for this script to remain in the image after running
+rm $0