summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--Dockerfile2
-rw-r--r--README.md4
-rw-r--r--README_AEP.md14
-rw-r--r--README_AWS.md2
-rw-r--r--README_OSE.md6
-rw-r--r--README_origin.md4
-rwxr-xr-xbin/cluster13
-rwxr-xr-xbin/ohi47
-rw-r--r--bin/openshift_ansible/awsutil.py100
-rwxr-xr-xbin/opssh50
-rwxr-xr-xbin/oscp19
-rwxr-xr-xbin/ossh35
-rwxr-xr-xbin/ossh_bash_completion12
-rw-r--r--bin/ossh_zsh_completion6
-rw-r--r--bin/zsh_functions/_ossh2
-rw-r--r--docs/best_practices_guide.adoc67
-rw-r--r--docs/style_guide.adoc18
-rw-r--r--filter_plugins/oo_filters.py58
-rw-r--r--filter_plugins/openshift_master.py28
-rwxr-xr-xgit/parent.py96
-rwxr-xr-xgit/parent.rb45
-rwxr-xr-xgit/yaml_validation.py73
-rwxr-xr-xgit/yaml_validation.rb72
-rw-r--r--inventory/aws/hosts/ec2.ini4
-rw-r--r--inventory/byo/hosts.aep.example27
-rw-r--r--inventory/byo/hosts.origin.example27
-rw-r--r--inventory/byo/hosts.ose.example27
-rw-r--r--openshift-ansible.spec128
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml2
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml4
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j27
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml6
-rw-r--r--playbooks/adhoc/uninstall.yml4
-rw-r--r--playbooks/aws/openshift-cluster/add_nodes.yml (renamed from playbooks/aws/openshift-cluster/addNodes.yml)0
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml27
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml29
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md17
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml14
l---------playbooks/byo/openshift-master/filter_plugins1
l---------playbooks/byo/openshift-master/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-master/restart.yml4
l---------playbooks/byo/openshift-master/roles1
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml48
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml87
l---------playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml137
-rw-r--r--playbooks/common/openshift-etcd/config.yml1
-rw-r--r--playbooks/common/openshift-master/config.yml10
-rw-r--r--playbooks/common/openshift-master/restart.yml151
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml39
-rw-r--r--playbooks/common/openshift-master/restart_hosts_pacemaker.yml25
-rw-r--r--playbooks/common/openshift-master/restart_services.yml27
-rw-r--r--playbooks/common/openshift-master/restart_services_pacemaker.yml10
-rw-r--r--playbooks/common/openshift-node/config.yml65
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml27
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml2
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml18
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml27
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml31
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml27
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml12
-rw-r--r--roles/chrony/README.md31
-rw-r--r--roles/chrony/defaults/main.yml2
-rw-r--r--roles/chrony/handlers/main.yml5
-rw-r--r--roles/chrony/meta/main.yml18
-rw-r--r--roles/chrony/tasks/main.yml30
-rw-r--r--roles/chrony/templates/chrony.conf.j245
-rw-r--r--roles/chrony/vars/main.yml2
-rw-r--r--roles/cockpit/tasks/main.yml2
-rw-r--r--roles/etcd/tasks/main.yml16
-rw-r--r--roles/etcd_ca/tasks/main.yml4
-rw-r--r--roles/fluentd_master/tasks/main.yml4
-rw-r--r--roles/fluentd_node/tasks/main.yml5
-rw-r--r--roles/haproxy/defaults/main.yml4
-rw-r--r--roles/haproxy/handlers/main.yml1
-rw-r--r--roles/haproxy/tasks/main.yml5
-rw-r--r--roles/lib_timedatectl/library/timedatectl.py74
-rw-r--r--roles/lib_zabbix/library/zbx_action.py147
-rw-r--r--roles/lib_zabbix/library/zbx_host.py21
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml1
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml1
-rw-r--r--roles/nuage_master/README.md8
-rw-r--r--roles/nuage_master/files/serviceaccount.sh63
-rw-r--r--roles/nuage_master/handlers/main.yaml18
-rw-r--r--roles/nuage_master/tasks/main.yaml34
-rw-r--r--roles/nuage_master/templates/nuagekubemon.j219
-rw-r--r--roles/nuage_master/vars/main.yaml7
-rw-r--r--roles/nuage_node/README.md9
-rw-r--r--roles/nuage_node/handlers/main.yaml8
-rw-r--r--roles/nuage_node/tasks/main.yaml37
-rw-r--r--roles/nuage_node/templates/vsp-k8s.j214
-rw-r--r--roles/nuage_node/vars/main.yaml9
-rw-r--r--roles/openshift_cli/tasks/main.yml19
-rw-r--r--roles/openshift_common/tasks/main.yml13
-rw-r--r--roles/openshift_common/vars/main.yml1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py133
-rw-r--r--roles/openshift_facts/tasks/main.yml13
-rw-r--r--roles/openshift_master/defaults/main.yml6
-rw-r--r--roles/openshift_master/handlers/main.yml13
-rw-r--r--roles/openshift_master/tasks/main.yml161
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-api.j2)4
l---------roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j21
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2)2
l---------roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j21
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2)0
-rw-r--r--roles/openshift_master/templates/docker/master.docker.service.j2 (renamed from roles/openshift_master/templates/master.docker.service.j2)0
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j223
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j29
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-api.service.j2)0
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-controllers.j2)4
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 (renamed from roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2)0
-rw-r--r--roles/openshift_master/vars/main.yml5
-rw-r--r--roles/openshift_master_ca/tasks/main.yml10
-rw-r--r--roles/openshift_master_cluster/tasks/configure.yml3
-rw-r--r--roles/openshift_node/tasks/main.yml44
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml4
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j26
-rw-r--r--roles/os_firewall/defaults/main.yml1
-rw-r--r--roles/os_firewall/tasks/main.yml4
-rw-r--r--roles/os_zabbix/tasks/main.yml18
-rw-r--r--roles/os_zabbix/vars/template_config_loop.yml14
-rw-r--r--roles/os_zabbix/vars/template_docker.yml2
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml26
-rw-r--r--roles/oso_host_monitoring/handlers/main.yml6
-rw-r--r--roles/oso_host_monitoring/tasks/main.yml20
-rw-r--r--roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j243
-rw-r--r--roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 (renamed from roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2)50
-rw-r--r--roles/oso_monitoring_tools/README.md54
-rw-r--r--roles/oso_monitoring_tools/defaults/main.yml2
-rw-r--r--roles/oso_monitoring_tools/handlers/main.yml2
-rw-r--r--roles/oso_monitoring_tools/meta/main.yml8
-rw-r--r--roles/oso_monitoring_tools/tasks/main.yml18
-rw-r--r--roles/oso_monitoring_tools/vars/main.yml12
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml18
-rw-r--r--roles/rhel_subscribe/tasks/main.yml2
-rw-r--r--utils/src/ooinstall/cli_installer.py27
-rw-r--r--utils/src/ooinstall/oo_config.py4
-rw-r--r--utils/src/ooinstall/openshift_ansible.py15
-rw-r--r--utils/test/cli_installer_tests.py42
-rw-r--r--utils/test/fixture.py16
148 files changed, 2565 insertions, 868 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index caa4ded81..ead513c3d 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.26-1 ./
+3.0.36-1 ./
diff --git a/Dockerfile b/Dockerfile
index ab16ca609..02ab51680 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,7 +6,7 @@ RUN yum -y install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.no
# Not sure if all of these packages are necessary
# only git and ansible are known requirements
-RUN yum install -y --enablerepo rhel-7-server-extras-rpms net-tools bind-utils git ansible
+RUN yum install -y --enablerepo rhel-7-server-extras-rpms net-tools bind-utils git ansible pyOpenSSL
ADD ./ /opt/openshift-ansible/
diff --git a/README.md b/README.md
index cef2ed0b6..d05e3992e 100644
--- a/README.md
+++ b/README.md
@@ -6,11 +6,11 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise.
- Install base dependencies:
- Fedora:
```
- dnf install -y ansible rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography
+ dnf install -y ansible-1.9.4 rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography
```
- OSX:
```
- # Install ansible and python 2
+ # Install ansible 1.9.4 and python 2
brew install ansible python
# Required ruby gems
gem install thor parseconfig
diff --git a/README_AEP.md b/README_AEP.md
index 584a7afff..739c4baeb 100644
--- a/README_AEP.md
+++ b/README_AEP.md
@@ -10,19 +10,17 @@
* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
## Requirements
-* ansible
- * Tested using ansible 1.9.1 and 1.9.2
- * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
+* ansible 1.9.4
* Available in Fedora channels
* Available for EL with EPEL and Optional channel
* One or more RHEL 7.1 VMs
* Either ssh key based auth for the root user or ssh key based auth for a user
with sudo access (no password)
-* A checkout of atomic-enterprise-ansible from https://github.com/projectatomic/atomic-enterprise-ansible/
+* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
```sh
- git clone https://github.com/projectatomic/atomic-enterprise-ansible.git
- cd atomic-enterprise-ansible
+ git clone https://github.com/openshift/openshift-ansible.git
+ cd openshift-ansible
```
## Caveats
@@ -81,10 +79,10 @@ deployment_type=atomic-enterprise
# Pre-release registry URL; note that in the future these images
# may have an atomicenterprise/aep- prefix or so.
-oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
+oreg_url=rcm-img-docker:5001/openshift3/ose-${component}:${version}
# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
# host group for masters
[masters]
diff --git a/README_AWS.md b/README_AWS.md
index f8ecaec49..c605de43d 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -51,7 +51,7 @@ to setup a private key file to allow ansible to connect to the created hosts.
To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
```
Host *.compute-1.amazonaws.com
- PrivateKey $HOME/.ssh/my_private_key.pem
+ IdentityFile $HOME/.ssh/my_private_key.pem
```
Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
diff --git a/README_OSE.md b/README_OSE.md
index 66fba33e5..f7615ae38 100644
--- a/README_OSE.md
+++ b/README_OSE.md
@@ -10,9 +10,7 @@
* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
## Requirements
-* ansible
- * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+
- * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
+* ansible 1.9.4
* Available in Fedora channels
* Available for EL with EPEL and Optional channel
* One or more RHEL 7.1 VMs
@@ -82,7 +80,7 @@ deployment_type=enterprise
# Pre-release additional repo
openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
'baseurl':
-'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'http://buildvm/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
'enabled': 1, 'gpgcheck': 0}]
# Origin copr repo
diff --git a/README_origin.md b/README_origin.md
index 0387e213f..761d1509f 100644
--- a/README_origin.md
+++ b/README_origin.md
@@ -10,9 +10,7 @@
* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
## Requirements
-* ansible
- * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+
- * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
+* ansible 1.9.4
* Available in Fedora channels
* Available for EL with EPEL and Optional channel
* One or more RHEL 7.1+, CentOS 7.1+, or Fedora 23+ VMs
diff --git a/bin/cluster b/bin/cluster
index 3081ebd4a..c3b101c98 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -68,7 +68,7 @@ class Cluster(object):
self.action(args, inventory, cluster, playbook)
- def addNodes(self, args):
+ def add_nodes(self, args):
"""
Add nodes to an existing cluster for given provider
:param args: command line arguments provided by user
@@ -76,7 +76,7 @@ class Cluster(object):
cluster = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args),
}
- playbook = "playbooks/{0}/openshift-cluster/addNodes.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
cluster['num_nodes'] = args.nodes
@@ -294,11 +294,8 @@ if __name__ == '__main__':
meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
meta_parser.add_argument('-t', '--deployment-type',
- choices=['origin', 'online', 'enterprise'],
+ choices=['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'],
help='Deployment type. (default: origin)')
- meta_parser.add_argument('-T', '--product-type',
- choices=['openshift', 'atomic-enterprise'],
- help='Product type. (default: openshift)')
meta_parser.add_argument('-o', '--option', action='append',
help='options')
@@ -324,13 +321,13 @@ if __name__ == '__main__':
create_parser.set_defaults(func=cluster.create)
- create_parser = action_parser.add_parser('addNodes', help='Add nodes to a cluster',
+ create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster',
parents=[meta_parser])
create_parser.add_argument('-n', '--nodes', default=1, type=int,
help='number of nodes to add to the cluster')
create_parser.add_argument('-i', '--infra', default=1, type=int,
help='number of infra nodes to add to the cluster')
- create_parser.set_defaults(func=cluster.addNodes)
+ create_parser.set_defaults(func=cluster.add_nodes)
config_parser = action_parser.add_parser('config',
diff --git a/bin/ohi b/bin/ohi
index be9c53ec0..d71a4c4b1 100755
--- a/bin/ohi
+++ b/bin/ohi
@@ -48,28 +48,18 @@ class Ohi(object):
self.aws.print_host_types()
return 0
- hosts = None
- if self.args.host_type is not None and \
- self.args.env is not None:
- # Both env and host-type specified
- hosts = self.aws.get_host_list(host_type=self.args.host_type,
- envs=self.args.env,
- version=self.args.openshift_version,
- cached=self.args.cache_only)
-
- if self.args.host_type is None and \
- self.args.env is not None:
- # Only env specified
- hosts = self.aws.get_host_list(envs=self.args.env,
- version=self.args.openshift_version,
- cached=self.args.cache_only)
-
- if self.args.host_type is not None and \
- self.args.env is None:
- # Only host-type specified
- hosts = self.aws.get_host_list(host_type=self.args.host_type,
- version=self.args.openshift_version,
- cached=self.args.cache_only)
+ if self.args.v3:
+ version = '3'
+ elif self.args.all_versions:
+ version = 'all'
+ else:
+ version = '2'
+
+ hosts = self.aws.get_host_list(clusters=self.args.cluster,
+ host_type=self.args.host_type,
+ envs=self.args.env,
+ version=version,
+ cached=self.args.cache_only)
if hosts is None:
# We weren't able to determine what they wanted to do
@@ -104,19 +94,26 @@ class Ohi(object):
parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types')
+ parser.add_argument('--list', default=False, action='store_true', help='List all hosts')
- parser.add_argument('-e', '--env', action="store", help="Which environment to use")
+ parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use")
+ parser.add_argument('-e', '--env', action="append", help="Which environment to use")
parser.add_argument('-t', '--host-type', action="store", help="Which host type to use")
parser.add_argument('-l', '--user', action='store', default=None, help='username')
- parser.add_argument('-c', '--cache-only', action='store_true', default=False,
+ parser.add_argument('--cache-only', action='store_true', default=False,
help='Retrieve the host inventory by cache only. Default is false.')
- parser.add_argument('-o', '--openshift-version', action='store', default='2',
+ parser.add_argument('--v2', action='store_true', default=True,
help='Specify the openshift version. Default is 2')
+ parser.add_argument('--v3', action='store_true', default=False,
+ help='Specify the openshift version.')
+
+ parser.add_argument('--all-versions', action='store_true', default=False,
+ help='Specify the openshift version. Return all versions')
self.args = parser.parse_args()
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 76b4f4f51..3639ef733 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -59,9 +59,23 @@ class AwsUtil(object):
minv.run()
return minv.result
+ def get_clusters(self):
+ """Searches for cluster tags in the inventory and returns all of the clusters found."""
+ pattern = re.compile(r'^oo_clusterid_(.*)')
+
+ clusters = []
+ inv = self.get_inventory()
+ for key in inv.keys():
+ matched = pattern.match(key)
+ if matched:
+ clusters.append(matched.group(1))
+
+ clusters.sort()
+ return clusters
+
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
- pattern = re.compile(r'^tag_environment_(.*)')
+ pattern = re.compile(r'^oo_environment_(.*)')
envs = []
inv = self.get_inventory()
@@ -75,7 +89,7 @@ class AwsUtil(object):
def get_host_types(self):
"""Searches for host-type tags in the inventory and returns all host-types found."""
- pattern = re.compile(r'^tag_host-type_(.*)')
+ pattern = re.compile(r'^oo_hosttype_(.*)')
host_types = []
inv = self.get_inventory()
@@ -154,52 +168,68 @@ class AwsUtil(object):
return host_type
@staticmethod
+ def gen_version_tag(ver):
+ """Generate the version tag
+ """
+ return "oo_version_%s" % ver
+
+ @staticmethod
+ def gen_clusterid_tag(clu):
+ """Generate the clusterid tag
+ """
+ return "oo_clusterid_%s" % clu
+
+ @staticmethod
def gen_env_tag(env):
"""Generate the environment tag
"""
- return "tag_environment_%s" % env
+ return "oo_environment_%s" % env
- def gen_host_type_tag(self, host_type):
+ def gen_host_type_tag(self, host_type, version):
"""Generate the host type tag
"""
- host_type = self.resolve_host_type(host_type)
- return "tag_host-type_%s" % host_type
+ if version == '2':
+ host_type = self.resolve_host_type(host_type)
+ return "oo_hosttype_%s" % host_type
- def get_host_list(self, host_type=None, envs=None, version=None, cached=False):
+ # This function uses all of these params to perform a filters on our host inventory.
+ # pylint: disable=too-many-arguments
+ def get_host_list(self, clusters=None, host_type=None, envs=None, version=None, cached=False):
"""Get the list of hosts from the inventory using host-type and environment
"""
retval = set([])
envs = envs or []
+
inv = self.get_inventory(cached=cached)
- # We prefer to deal with a list of environments
- if issubclass(type(envs), basestring):
- if envs == 'all':
- envs = self.get_environments()
+ retval.update(inv.get('all_hosts', []))
+
+ if clusters:
+ cluster_hosts = set([])
+ if len(clusters) > 1:
+ for cluster in clusters:
+ clu_tag = AwsUtil.gen_clusterid_tag(cluster)
+ cluster_hosts.update(inv.get(clu_tag, []))
+ else:
+ cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), []))
+
+ retval.intersection_update(cluster_hosts)
+
+ if envs:
+ env_hosts = set([])
+ if len(envs) > 1:
+ for env in envs:
+ env_tag = AwsUtil.gen_env_tag(env)
+ env_hosts.update(inv.get(env_tag, []))
else:
- envs = [envs]
-
- if host_type and envs:
- # Both host type and environment were specified
- for env in envs:
- retval.update(inv.get('tag_environment_%s' % env, []))
- retval.intersection_update(inv.get(self.gen_host_type_tag(host_type), []))
-
- elif envs and not host_type:
- # Just environment was specified
- for env in envs:
- env_tag = AwsUtil.gen_env_tag(env)
- if env_tag in inv.keys():
- retval.update(inv.get(env_tag, []))
-
- elif host_type and not envs:
- # Just host-type was specified
- host_type_tag = self.gen_host_type_tag(host_type)
- if host_type_tag in inv.keys():
- retval.update(inv.get(host_type_tag, []))
-
- # If version is specified then return only hosts in that version
- if version:
- retval.intersection_update(inv.get('oo_version_%s' % version, []))
+ env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), []))
+
+ retval.intersection_update(env_hosts)
+
+ if host_type:
+ retval.intersection_update(inv.get(self.gen_host_type_tag(host_type, version), []))
+
+ if version != 'all':
+ retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), []))
return retval
diff --git a/bin/opssh b/bin/opssh
index 8ac526049..7a2ffdb1d 100755
--- a/bin/opssh
+++ b/bin/opssh
@@ -13,7 +13,10 @@ Options:
-p PAR, --par=PAR max number of parallel threads (OPTIONAL)
--outdir=OUTDIR output directory for stdout files (OPTIONAL)
--errdir=ERRDIR output directory for stderr files (OPTIONAL)
+ -c CLUSTER, --cluster CLUSTER
+ which cluster to use
-e ENV, --env ENV which environment to use
+ --v3 When working with v3 environments. v2 by default
-t HOST_TYPE, --host-type HOST_TYPE
which host type to use
--list-host-types list all of the host types
@@ -45,9 +48,9 @@ fi
# See if ohi is installed
if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
+ echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
- exit 10
+ exit 10
fi
PAR=200
@@ -64,12 +67,23 @@ while [ $# -gt 0 ] ; do
shift # get past the value of the option
;;
+ -c)
+ shift # get past the option
+ CLUSTER=$1
+ shift # get past the value of the option
+ ;;
+
-e)
shift # get past the option
ENV=$1
shift # get past the value of the option
;;
+ --v3)
+ OPENSHIFT_VERSION="--v3"
+ shift # get past the value of the option
+ ;;
+
--timeout)
shift # get past the option
TIMEOUT=$1
@@ -106,20 +120,26 @@ while [ $# -gt 0 ] ; do
done
# Get host list from ohi
-if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$ENV" ] ; then
- HOSTS="$(ohi -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)"
+CMD=""
+if [ -n "$CLUSTER" ] ; then
+ CMD="$CMD -c $CLUSTER"
+fi
+
+if [ -n "$ENV" ] ; then
+ CMD="$CMD -e $ENV"
+fi
+
+if [ -n "$HOST_TYPE" ] ; then
+ CMD="$CMD -t $HOST_TYPE"
+fi
+
+if [ -n "$OPENSHIFT_VERSION" ] ; then
+ CMD="$CMD $OPENSHIFT_VERSION"
+fi
+
+if [ -n "$CMD" ] ; then
+ HOSTS="$(ohi $CMD 2>/dev/null)"
OHI_ECODE=$?
-else
- echo
- echo "Error: either -e or -t must be specified"
- echo
- exit 10
fi
if [ $OHI_ECODE -ne 0 ] ; then
diff --git a/bin/oscp b/bin/oscp
index c79fc8785..b15133642 100755
--- a/bin/oscp
+++ b/bin/oscp
@@ -138,7 +138,7 @@ class Oscp(object):
# attempt to select the correct environment if specified
if self.env:
- results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
+ results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
if results:
return results
@@ -164,10 +164,8 @@ class Oscp(object):
print '{0:<35} {1}'.format(key, server_info[key])
else:
for host_id, server_info in results[:limit]:
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
if limit:
print
@@ -177,10 +175,9 @@ class Oscp(object):
else:
for env, host_ids in self.host_inventory.items():
for host_id, server_info in host_ids.items():
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
+
def scp(self):
'''scp files to or from a specified host
@@ -209,12 +206,12 @@ class Oscp(object):
if len(results) > 1:
print "Multiple results found for %s." % self.host
for result in results:
- print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
+ print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
return # early exit, too many results
# Assume we have one and only one.
hostname, server_info = results[0]
- dns = server_info['ec2_public_dns_name']
+ dns = server_info['oo_pulic_ip']
host_str = "%s%s%s" % (self.user, dns, self.path)
diff --git a/bin/ossh b/bin/ossh
index 50fa996c3..5e2506638 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -55,15 +55,15 @@ class Ossh(object):
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
parser.add_argument('-e', '--env', action="store",
- help="Which environment to search for the host ")
+ help="Which environment to search for the host ")
parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
+ action="store_true", help="debug mode")
parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
+ action="store_true", help="Verbose?")
parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
+ action="store_true", help="Force a refresh on the host cache.")
parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
+ action="store_true", help="list out hosts")
parser.add_argument('-c', '--command', action='store',
help='Command to run on remote host')
parser.add_argument('-l', '--login_name', action='store',
@@ -72,6 +72,8 @@ class Ossh(object):
parser.add_argument('-o', '--ssh_opts', action='store',
help='options to pass to SSH.\n \
"-oForwardX11=yes,TCPKeepAlive=yes"')
+ parser.add_argument('-A', default=False, action="store_true",
+ help='Forward authentication agent')
parser.add_argument('host', nargs='?', default='')
self.args = parser.parse_args()
@@ -127,7 +129,7 @@ class Ossh(object):
# attempt to select the correct environment if specified
if self.env:
- results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
+ results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
if results:
return results
@@ -153,10 +155,8 @@ class Ossh(object):
print '{0:<35} {1}'.format(key, server_info[key])
else:
for host_id, server_info in results[:limit]:
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
if limit:
print
@@ -166,10 +166,8 @@ class Ossh(object):
else:
for env, host_ids in self.host_inventory.items():
for host_id, server_info in host_ids.items():
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
def ssh(self):
'''SSH to a specified host
@@ -181,6 +179,9 @@ class Ossh(object):
if self.user:
ssh_args.append('-l%s' % self.user)
+ if self.args.A:
+ ssh_args.append('-A')
+
if self.args.verbose:
ssh_args.append('-vvv')
@@ -195,12 +196,12 @@ class Ossh(object):
if len(results) > 1:
print "Multiple results found for %s." % self.host
for result in results:
- print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
+ print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
return # early exit, too many results
# Assume we have one and only one.
- hostname, server_info = results[0]
- dns = server_info['ec2_public_dns_name']
+ _, server_info = results[0]
+ dns = server_info['oo_public_ip']
ssh_args.append(dns)
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 440fa0a45..77b770a43 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,12 +1,12 @@
__ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+ /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
fi
}
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
__opssh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index f9454357b..170ca889b 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -2,13 +2,13 @@
_ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
fi
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
index e34ca5bd4..65979c58a 100644
--- a/bin/zsh_functions/_ossh
+++ b/bin/zsh_functions/_ossh
@@ -2,7 +2,7 @@
_ossh_known_hosts(){
if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items()])')
+ print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
fi
}
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index 6b744333c..267aa850d 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -13,9 +13,12 @@ This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
== Pull Requests
+
+
+[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged, Rule>>
| All pull requests MUST pass the build bot *before* they are merged.
|===
@@ -30,9 +33,10 @@ The tooling is flexible enough that exceptions can be made so that the tool the
=== Python Source Files
'''
+[[Python-source-files-MUST-contain-the-following-vim-mode-line]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Python-source-files-MUST-contain-the-following-vim-mode-line, Rule>>
| Python source files MUST contain the following vim mode line.
|===
@@ -48,9 +52,10 @@ If mode lines for other editors are needed, please open a GitHub issue.
=== Method Signatures
'''
+[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]]
[cols="2v,v"]
|===
-| **Rule**
+| <<When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
| When adding a new paramemter to an existing method, a default value SHOULD be used
|===
The purpose of this rule is to make it so that method signatures are backwards compatible.
@@ -74,18 +79,20 @@ def add_person(first_name, last_name, age=None):
http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
'''
+[[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]]
[cols="2v,v"]
|===
-| **Rule**
+| <<PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file, Rule>>
| PyLint rules MUST NOT be disabled on a whole file.
|===
Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-particular-message[disable the PyLint check on the line where PyLint is complaining].
'''
+[[PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions]]
[cols="2v,v"]
|===
-| **Rule**
+| <<PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions, Rule>>
| PyLint rules MUST NOT be disabled unless they meet one of the following exceptions
|===
@@ -95,9 +102,10 @@ Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-par
1. When PyLint fails, but the code makes more sense the way it is formatted (stylistic exception). For this exception, the description of the PyLint disable MUST state why the code is more clear, AND the person reviewing the PR will decide if they agree or not. The reviewer may reject the PR if they disagree with the reason for the disable.
'''
+[[All-PyLint-rule-disables-MUST-be-documented-in-the-code]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-PyLint-rule-disables-MUST-be-documented-in-the-code, Rule>>
| All PyLint rule disables MUST be documented in the code.
|===
@@ -124,9 +132,10 @@ metadata[line] = results.pop()
=== Yaml Files (Playbooks, Roles, Vars, etc)
'''
+[[Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead, Rule>>
| Ansible files SHOULD NOT use JSON (use pure YAML instead).
|===
@@ -144,9 +153,10 @@ Every effort should be made to keep our Ansible YAML files in pure YAML.
=== Modules
'''
+[[Custom-Ansible-modules-SHOULD-be-embedded-in-a-role]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Custom-Ansible-modules-SHOULD-be-embedded-in-a-role, Rule>>
| Custom Ansible modules SHOULD be embedded in a role.
|===
@@ -177,9 +187,10 @@ The purpose of this rule is to make it easy to include custom modules in our pla
'''
+[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed, Rule>>
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
|===
@@ -204,9 +215,10 @@ When a module has several parameters that are being passed in, it's hard to see
'''
+[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters, Rule>>
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
|===
@@ -228,9 +240,10 @@ Lines that are long quickly become a wall of text that isn't easily parsable. It
----
'''
+[[The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module, Rule>>
| The Ansible `command` module SHOULD be used instead of the Ansible `shell` module.
|===
.Context
@@ -251,9 +264,10 @@ The Ansible `shell` module can run most commands that can be run from a bash CLI
----
'''
+[[The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module, Rule>>
| The Ansible `quote` filter MUST be used with any variable passed into the shell module.
|===
.Context
@@ -279,9 +293,10 @@ It is recommended not to use the `shell` module. However, if it absolutely must
* http://docs.ansible.com/fail_module.html[Ansible Fail Module]
'''
+[[Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>>
| Ansible playbooks MUST begin with checks for any variables that they require.
|===
@@ -299,9 +314,10 @@ If an Ansible playbook requires certain variables to be set, it's best to check
----
'''
+[[Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>>
| Ansible roles tasks/main.yml file MUST begin with checks for any variables that they require.
|===
@@ -318,9 +334,10 @@ If an Ansible role requires certain variables to be set, it's best to check for
=== Tasks
'''
+[[Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks, Rule>>
| Ansible tasks SHOULD NOT be used in ansible playbooks. Instead, use pre_tasks and post_tasks.
|===
An Ansible play is defined as a Yaml dictionary. Because of that, ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles.
@@ -370,9 +387,10 @@ Therefore, we SHOULD use pre_tasks and post_tasks to make it more clear when the
=== Roles
'''
+[[All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name, Rule>>
| All tasks in a role SHOULD be tagged with the role name.
|===
@@ -395,9 +413,10 @@ This is very useful when developing and debugging new tasks. It can also signifi
'''
+[[The-Ansible-roles-directory-MUST-maintain-a-flat-structure]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-Ansible-roles-directory-MUST-maintain-a-flat-structure, Rule>>
| The Ansible roles directory MUST maintain a flat structure.
|===
@@ -410,9 +429,10 @@ This is very useful when developing and debugging new tasks. It can also signifi
* Make it compatible with Ansible Galaxy
'''
+[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]]
[cols="2v,v"]
|===
-| **Rule**
+| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]]
| Ansible Roles SHOULD be named like technology_component[_subcomponent].
|===
@@ -430,9 +450,10 @@ Many times the `technology` portion of the pattern will line up with a package n
* http://jinja.pocoo.org/docs/dev/templates/#builtin-filters[Jinja2 Builtin Filters]
'''
+[[The-default-filter-SHOULD-replace-empty-strings-lists-etc]]
[cols="2v,v"]
|===
-| **Rule**
+| <<The-default-filter-SHOULD-replace-empty-strings-lists-etc, Rule>>
| The `default` filter SHOULD replace empty strings, lists, etc.
|===
@@ -469,15 +490,17 @@ This is almost always more desirable than an empty list, string, etc.
=== Yum and DNF
'''
+[[Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum, Rule>>
| Package installation MUST use ansible action module to abstract away dnf/yum.
-| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
|===
+
+[[Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively, Rule>>
| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
|===
diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc
index 09d4839c7..72eaedcf9 100644
--- a/docs/style_guide.adoc
+++ b/docs/style_guide.adoc
@@ -19,9 +19,10 @@ This style guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
* https://www.python.org/dev/peps/pep-0008/#maximum-line-length[Python Pep8 Line Length]
'''
+[[All-lines-SHOULD-be-no-longer-than-80-characters]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-lines-SHOULD-be-no-longer-than-80-characters, Rule>>
| All lines SHOULD be no longer than 80 characters.
|===
@@ -31,9 +32,10 @@ Code readability is subjective, therefore pull-requests SHOULD still be merged,
'''
+[[All-lines-MUST-be-no-longer-than-120-characters]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-lines-MUST-be-no-longer-than-120-characters, Rule>>
| All lines MUST be no longer than 120 characters.
|===
@@ -46,9 +48,10 @@ This is a hard limit and is enforced by the build bot. This check MUST NOT be di
=== Ansible Yaml file extension
'''
+[[All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc]]
[cols="2v,v"]
|===
-| **Rule**
+| <<All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc, Rule>>
| All Ansible Yaml files MUST have a .yml extension (and NOT .YML, .yaml etc).
|===
@@ -59,9 +62,10 @@ Example: `tasks.yml`
=== Ansible CLI Variables
'''
+[[Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli, Rule>>
| Variables meant to be passed in from the ansible CLI MUST have a prefix of cli_
|===
@@ -76,9 +80,10 @@ ansible-playbook -e cli_foo=bar someplays.yml
=== Ansible Global Variables
'''
+[[Global-variables-MUST-have-a-prefix-of-g]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Global-variables-MUST-have-a-prefix-of-g, Rule>>
| Global variables MUST have a prefix of g_
|===
Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc.
@@ -94,9 +99,10 @@ g_environment: someval
Ansible role variables are defined as variables contained in (or passed into) a role.
'''
+[[Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules]]
[cols="2v,v"]
|===
-| **Rule**
+| <<Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules, Rule>>
| Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules.
|===
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 326c36f6c..ae275b051 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -12,6 +12,8 @@ import os
import pdb
import re
import json
+import yaml
+from ansible.utils.unicode import to_unicode
class FilterModule(object):
''' Custom ansible filters '''
@@ -412,13 +414,19 @@ class FilterModule(object):
in the following layout:
"c_id": {
- "master": [
- { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1", "subtype": "default" }]
- "node": [
- { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2", "subtype": "infra" },
- { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3", "subtype": "compute" },
+ "master": {
+ "default": [
+ { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
+ ]
+ "node": {
+ "infra": [
+ { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
+ ],
+ "compute": [
+ { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
...
- ]}
+ ]
+ }
'''
def _get_tag_value(tags, key):
@@ -428,33 +436,29 @@ class FilterModule(object):
returns 'value2'
'''
for tag in tags:
- # Skip tag_env-host-type to avoid ambiguity with tag_env
- # Removing env-host-type tag but leaving this here
- if tag[:17] == 'tag_env-host-type':
- continue
if tag[:len(key)+4] == 'tag_' + key:
return tag[len(key)+5:]
raise KeyError(key)
def _add_host(clusters,
- env,
+ clusterid,
host_type,
sub_host_type,
host):
''' Add a new host in the clusters data structure '''
- if env not in clusters:
- clusters[env] = {}
- if host_type not in clusters[env]:
- clusters[env][host_type] = {}
- if sub_host_type not in clusters[env][host_type]:
- clusters[env][host_type][sub_host_type] = []
- clusters[env][host_type][sub_host_type].append(host)
+ if clusterid not in clusters:
+ clusters[clusterid] = {}
+ if host_type not in clusters[clusterid]:
+ clusters[clusterid][host_type] = {}
+ if sub_host_type not in clusters[clusterid][host_type]:
+ clusters[clusterid][host_type][sub_host_type] = []
+ clusters[clusterid][host_type][sub_host_type].append(host)
clusters = {}
for host in data:
try:
_add_host(clusters=clusters,
- env=_get_tag_value(host['group_names'], 'env'),
+ clusterid=_get_tag_value(host['group_names'], 'clusterid'),
host_type=_get_tag_value(host['group_names'], 'host-type'),
sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
host={'name': host['inventory_hostname'],
@@ -474,6 +478,19 @@ class FilterModule(object):
secret = os.urandom(num_bytes)
return secret.encode('base-64').strip()
+ @staticmethod
+ def to_padded_yaml(data, level=0, indent=2, **kw):
+ ''' returns a yaml snippet padded to match the indent level you specify '''
+ if data in [None, ""]:
+ return ""
+
+ try:
+ transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
+ padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
+ return to_unicode("\n{0}".format(padded))
+ except Exception as my_e:
+ raise errors.AnsibleFilterError('Failed to convert: %s', my_e)
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -493,5 +510,6 @@ class FilterModule(object):
"oo_parse_named_certificates": self.oo_parse_named_certificates,
"oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
"oo_pretty_print_cluster": self.oo_pretty_print_cluster,
- "oo_generate_secret": self.oo_generate_secret
+ "oo_generate_secret": self.oo_generate_secret,
+ "to_padded_yaml": self.to_padded_yaml,
}
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 8d7c62ad1..35a881a85 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -463,6 +463,32 @@ class FilterModule(object):
IdentityProviderBase.validate_idp_list(idp_list)
return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
+ @staticmethod
+ def validate_pcs_cluster(data, masters=None):
+ ''' Validates output from "pcs status", ensuring that each master
+ provided is online.
+ Ex: data = ('...',
+ 'PCSD Status:',
+ 'master1.example.com: Online',
+ 'master2.example.com: Online',
+ 'master3.example.com: Online',
+ '...')
+ masters = ['master1.example.com',
+ 'master2.example.com',
+ 'master3.example.com']
+ returns True
+ '''
+ if not issubclass(type(data), basestring):
+ raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
+ if not issubclass(type(masters), list):
+ raise errors.AnsibleFilterError("|failed expects masters is a list")
+ valid = True
+ for master in masters:
+ if "{0}: Online".format(master) not in data:
+ valid = False
+ return valid
+
def filters(self):
''' returns a mapping of filters to methods '''
- return {"translate_idps": self.translate_idps}
+ return {"translate_idps": self.translate_idps,
+ "validate_pcs_cluster": self.validate_pcs_cluster}
diff --git a/git/parent.py b/git/parent.py
new file mode 100755
index 000000000..154a02350
--- /dev/null
+++ b/git/parent.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+'''
+ Script to determine if this commit has also
+ been merged through the stage branch
+'''
+#
+# Usage:
+# parent_check.py <branch> <commit_id>
+#
+#
+import sys
+import subprocess
+
+def run_cli_cmd(cmd, in_stdout=None, in_stderr=None):
+ '''Run a command and return its output'''
+ if not in_stderr:
+ proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
+ else:
+ proc = subprocess.check_output(cmd, bufsize=-1, stdout=in_stdout, stderr=in_stderr, shell=False)
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ return {"rc": proc.returncode, "error": stderr}
+ else:
+ return {"rc": proc.returncode, "result": stdout}
+
+def main():
+ '''Check to ensure that the commit that is currently
+ being submitted is also in the stage branch.
+
+ if it is, succeed
+ else, fail
+ '''
+ branch = 'prod'
+
+ if sys.argv[1] != branch:
+ sys.exit(0)
+
+ # git co stg
+ results = run_cli_cmd(['/usr/bin/git', 'checkout', 'stg'])
+
+ # git pull latest
+ results = run_cli_cmd(['/usr/bin/git', 'pull'])
+
+ # setup on the <prod> branch in git
+ results = run_cli_cmd(['/usr/bin/git', 'checkout', 'prod'])
+
+ results = run_cli_cmd(['/usr/bin/git', 'pull'])
+ # merge the passed in commit into my current <branch>
+
+ commit_id = sys.argv[2]
+ results = run_cli_cmd(['/usr/bin/git', 'merge', commit_id])
+
+ # get the differences from stg and <branch>
+ results = run_cli_cmd(['/usr/bin/git', 'rev-list', '--left-right', 'stg...prod'])
+
+ # exit here with error code if the result coming back is an error
+ if results['rc'] != 0:
+ print results['error']
+ sys.exit(results['rc'])
+
+ count = 0
+ # Each 'result' is a commit
+ # Walk through each commit and see if it is in stg
+ for commit in results['result'].split('\n'):
+
+ # continue if it is already in stg
+ if not commit or commit.startswith('<'):
+ continue
+
+ # remove the first char '>'
+ commit = commit[1:]
+
+ # check if any remote branches contain $commit
+ results = run_cli_cmd(['/usr/bin/git', 'branch', '-q', '-r', '--contains', commit], in_stderr=None)
+
+ # if this comes back empty, nothing contains it, we can skip it as
+ # we have probably created the merge commit here locally
+ if results['rc'] == 0 and len(results['result']) == 0:
+ continue
+
+ # The results generally contain origin/pr/246/merge and origin/pr/246/head
+ # this is the pull request which would contain the commit in question.
+ #
+ # If the results do not contain origin/stg then stage does not contain
+ # the commit in question. Therefore we need to alert!
+ if 'origin/stg' not in results['result']:
+ print "\nFAILED: (These commits are not in stage.)\n"
+ print "\t%s" % commit
+ count += 1
+
+ # Exit with count of commits in #{branch} but not stg
+ sys.exit(count)
+
+if __name__ == '__main__':
+ main()
+
diff --git a/git/parent.rb b/git/parent.rb
deleted file mode 100755
index 2acb127c4..000000000
--- a/git/parent.rb
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env ruby
-#
-#
-#
-
-if __FILE__ == $0
- # If we aren't on master we don't need to parent check
- branch = 'prod'
- exit(0) if ARGV[0] !~ /#{branch}/
- commit_id = ARGV[1]
- %x[/usr/bin/git checkout #{branch}]
- %x[/usr/bin/git merge #{commit_id}]
-
- count = 0
- #lines = %x[/usr/bin/git rev-list --left-right stg...master].split("\n")
- lines = %x[/usr/bin/git rev-list --left-right remotes/origin/stg...#{branch}].split("\n")
- lines.each do |commit|
- # next if they are in stage
- next if commit =~ /^</
- # remove the first char '>'
- commit = commit[1..-1]
- # check if any remote branches contain $commit
- results = %x[/usr/bin/git branch -q -r --contains #{commit} 2>/dev/null ]
- # if this comes back empty, nothing contains it, we can skip it as
- # we have probably created the merge commit here locally
- next if results.empty?
-
- # The results generally contain origin/pr/246/merge and origin/pr/246/head
- # this is the pull request which would contain the commit in question.
- #
- # If the results do not contain origin/stg then stage does not contain
- # the commit in question. Therefore we need to alert!
- unless results =~ /origin\/stg/
- puts "\nFAILED: (These commits are not in stage.)\n"
- puts "\t#{commit}"
- count += 1
- end
- end
-
- # Exit with count of commits in #{branch} but not stg
- exit(count)
-end
-
-__END__
-
diff --git a/git/yaml_validation.py b/git/yaml_validation.py
new file mode 100755
index 000000000..69fd455a5
--- /dev/null
+++ b/git/yaml_validation.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+#
+# python yaml validator for a git commit
+#
+'''
+python yaml validator for a git commit
+'''
+import shutil
+import sys
+import os
+import tempfile
+import subprocess
+import yaml
+
+def get_changes(oldrev, newrev, tempdir):
+ '''Get a list of git changes from oldrev to newrev'''
+ proc = subprocess.Popen(['/usr/bin/git', 'diff', '--name-only', oldrev,
+ newrev, '--diff-filter=ACM'], stdout=subprocess.PIPE)
+ stdout, _ = proc.communicate()
+ files = stdout.split('\n')
+
+ # No file changes
+ if not files:
+ return []
+
+ cmd = '/usr/bin/git archive %s %s | /bin/tar x -C %s' % (newrev, " ".join(files), tempdir)
+ proc = subprocess.Popen(cmd, shell=True)
+ _, _ = proc.communicate()
+
+ rfiles = []
+ for dirpath, _, fnames in os.walk(tempdir):
+ for fname in fnames:
+ rfiles.append(os.path.join(dirpath, fname))
+
+ return rfiles
+
+def main():
+ '''
+ Perform yaml validation
+ '''
+ results = []
+ try:
+ tmpdir = tempfile.mkdtemp(prefix='jenkins-git-')
+ old, new, _ = sys.argv[1:]
+
+ for file_mod in get_changes(old, new, tmpdir):
+
+ print "+++++++ Received: %s" % file_mod
+
+ # if the file extensions is not yml or yaml, move along.
+ if not file_mod.endswith('.yml') and not file_mod.endswith('.yaml'):
+ continue
+
+ # We use symlinks in our repositories, ignore them.
+ if os.path.islink(file_mod):
+ continue
+
+ try:
+ yaml.load(open(file_mod))
+ results.append(True)
+
+ except yaml.scanner.ScannerError as yerr:
+ print yerr
+ results.append(False)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ if not all(results):
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
+
diff --git a/git/yaml_validation.rb b/git/yaml_validation.rb
deleted file mode 100755
index f5ded7a78..000000000
--- a/git/yaml_validation.rb
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env ruby
-#
-#
-#
-require 'yaml'
-require 'tmpdir'
-
-class YamlValidate
- def self.yaml_file?(filename)
- return filename.end_with?('.yaml') || filename.end_with?('.yml')
- end
-
- def self.short_yaml_ext?(filename)
- return filename.end_with?(".yml")
- end
-
- def self.valid_yaml?(filename)
- YAML::load_file(filename)
-
- return true
- end
-end
-
-class GitCommit
- attr_accessor :oldrev, :newrev, :refname, :tmp
- def initialize(oldrev, newrev, refname)
- @oldrev = oldrev
- @newrev = newrev
- @refname = refname
- @tmp = Dir.mktmpdir(@newrev)
- end
-
- def get_file_changes()
- files = %x[/usr/bin/git diff --name-only #{@oldrev} #{@newrev} --diff-filter=ACM].split("\n")
-
- # if files is empty we will get a full checkout. This happens on
- # a git rm file. If there are no changes then we need to skip the archive
- return [] if files.empty?
-
- # We only want to take the files that changed. Archive will do that when passed
- # the filenames. It will export these to a tmp dir
- system("/usr/bin/git archive #{@newrev} #{files.join(" ")} | tar x -C #{@tmp}")
- return Dir.glob("#{@tmp}/**/*").delete_if { |file| File.directory?(file) }
- end
-end
-
-if __FILE__ == $0
- while data = STDIN.gets
- oldrev, newrev, refname = data.split
- gc = GitCommit.new(oldrev, newrev, refname)
-
- results = []
- gc.get_file_changes().each do |file|
- begin
- puts "++++++ Received: #{file}"
-
- #raise "Yaml file extensions must be .yaml not .yml" if YamlValidate.short_yaml_ext? file
-
- # skip readme, other files, etc
- next unless YamlValidate.yaml_file?(file)
-
- results << YamlValidate.valid_yaml?(file)
- rescue Exception => ex
- puts "\n#{ex.message}\n\n"
- results << false
- end
- end
-
- #puts "RESULTS\n#{results.inspect}\n"
- exit 1 if results.include?(false)
- end
-end
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
index 1f503b8cf..aa0f9090f 100644
--- a/inventory/aws/hosts/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
@@ -45,10 +45,10 @@ vpc_destination_variable = ip_address
route53 = False
# To exclude RDS instances from the inventory, uncomment and set to False.
-#rds = False
+rds = False
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
-#elasticache = False
+elasticache = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
index a92b8e0fc..05aef586f 100644
--- a/inventory/byo/hosts.aep.example
+++ b/inventory/byo/hosts.aep.example
@@ -30,6 +30,26 @@ deployment_type=atomic-enterprise
# Enable cluster metrics
#use_cluster_metrics=true
+# Configure logoutURL in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+#openshift_master_logout_url=http://example.com
+
+# Configure extensionScripts in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
+
+# Configure extensionStylesheets in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_template=/path/to/login-template.html
+
# Configure metricsPublicURL in the master config for cluster metrics
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
@@ -117,6 +137,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# default project node selector
#osm_default_node_selector='region=primary'
+# Override the default pod eviction timeout
+#openshift_master_pod_eviction_timeout=5m
+
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs']
@@ -183,6 +206,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure node kubelet arguments
#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+# Configure logrotate scripts
+# See: https://github.com/nickhammond/ansible-logrotate
+#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
+
# host group for masters
[masters]
aep3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index c8a9918ac..7b240622d 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -31,6 +31,26 @@ deployment_type=origin
# Enable cluster metrics
#use_cluster_metrics=true
+# Configure logoutURL in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+#openshift_master_logout_url=http://example.com
+
+# Configure extensionScripts in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
+
+# Configure extensionStylesheets in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_template=/path/to/login-template.html
+
# Configure metricsPublicURL in the master config for cluster metrics
# See: https://docs.openshift.org/latest/install_config/cluster_metrics.html
#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
@@ -122,6 +142,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# default project node selector
#osm_default_node_selector='region=primary'
+# Override the default pod eviction timeout
+#openshift_master_pod_eviction_timeout=5m
+
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs']
@@ -193,6 +216,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure node kubelet arguments
#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+# Configure logrotate scripts
+# See: https://github.com/nickhammond/ansible-logrotate
+#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 2619c2416..e44d1abc9 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -30,6 +30,26 @@ deployment_type=openshift-enterprise
# Enable cluster metrics
#use_cluster_metrics=true
+# Configure logoutURL in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
+#openshift_master_logout_url=http://example.com
+
+# Configure extensionScripts in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
+
+# Configure extensionStylesheets in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
+#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
+
+# Configure extensions in the master config for console customization
+# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
+#openshift_master_oauth_template=/path/to/login-template.html
+
# Configure metricsPublicURL in the master config for cluster metrics
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
@@ -117,6 +137,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# default project node selector
#osm_default_node_selector='region=primary'
+# Override the default pod eviction timeout
+#openshift_master_pod_eviction_timeout=5m
+
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs']
@@ -183,6 +206,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure node kubelet arguments
#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+# Configure logrotate scripts
+# See: https://github.com/nickhammond/ansible-logrotate
+#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 7c260ff21..1a1445835 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.26
+Version: 3.0.36
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -259,6 +259,132 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Jan 25 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.36-1
+- Fixing awsutil to support aliases and v3 (kwoodson@redhat.com)
+- Fail when master restart playbook finds no active masters rather than any
+ failed masters. (abutcher@redhat.com)
+- Skipping any symlinks for the yaml validation check (kwoodson@redhat.com)
+- Added template for config loop. (twiest@redhat.com)
+- Test validate_pcs_cluster input is basestring instead of str.
+ (abutcher@redhat.com)
+- Fix error when oo_masters_to_config is empty (jdetiber@redhat.com)
+- Update inventory examples for console customization (spinolacastro@gmail.com)
+- Expose console config for customization (spinolacastro@gmail.com)
+- oso_host_monitoring: added environment as a var to the host monitoring
+ systemd script (mwoodson@redhat.com)
+- Check master certificates during upgrade. (abutcher@redhat.com)
+- Use haproxy frontend port for os_firewall. (abutcher@redhat.com)
+- Fix native master api sysconfig. (abutcher@redhat.com)
+- Enable kubernetes master config of podEvictionTimeout from ansible
+ (jstuever@redhat.com)
+- Fix wrapper pathing for non-root user install. (abutcher@redhat.com)
+- Remove camel case for bin/cluster addNodes (jdetiber@redhat.com)
+- Update cluster_hosts.yml for cloud providers (jdetiber@redhat.com)
+- Removing ruby scripts and replacing with python. (kwoodson@redhat.com)
+- Fixed a logic bug and yaml load (kwoodson@redhat.com)
+- Fixing yaml validation in python. Inputs behave differently as does glob
+ (kwoodson@redhat.com)
+- oso_monitoring: add the zabbix libs (mwoodson@redhat.com)
+- Removing removing scripts and moving to python. (kwoodson@redhat.com)
+- add ability to disable ztriggers and disable new container dns check
+ (jdiaz@redhat.com)
+- Remove default disable of SDN for GCE (jdetiber@redhat.com)
+- Fix hardcoded api_port in openshift_master_cluster (jdetiber@redhat.com)
+- Use local address for loopback kubeconfig (jdetiber@redhat.com)
+- consolidate steps and cleanup template dir (jdetiber@redhat.com)
+- v3_0_to_v3_1_upgrade: Remove is_atomic check for upgrades
+ (smunilla@redhat.com)
+- v3_0_to_v3_1_upgrade: Copy tasks rather than including from the playbook
+ (smunilla@redhat.com)
+- v3_0_to_v3_1_upgrade: Install storage packages (smunilla@redhat.com)
+- Controllers_port and firewall rules (spinolacastro@gmail.com)
+- Fix bind address/port when isn't default (spinolacastro@gmail.com)
+- Add ability to disable os_firewall (jdetiber@redhat.com)
+
+* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.35-1
+- added the lib_timedate role (mwoodson@redhat.com)
+- added chrony (mwoodson@redhat.com)
+- added oso_moniotoring tools role (mwoodson@redhat.com)
+- Improve pacemaker 'is-active' check. (abutcher@redhat.com)
+
+* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.34-1
+- clean up too-many-branches / logic (jdiaz@redhat.com)
+- atomic-openshift-installer: add containerized to inventory
+ (smunilla@redhat.com)
+- Add 'unknown' to possible output for the is-active check.
+ (abutcher@redhat.com)
+- Fix cluster_method conditional in master restart playbook.
+ (abutcher@redhat.com)
+- Use IdentityFile instead of PrivateKey (donovan.muller@gmail.com)
+- atomic-openshift-installer: Remove containerized install for 3.0
+ (smunilla@redhat.com)
+- Host group should be OSEv3 not OSv3 (donovan.muller@gmail.com)
+- Remove pause after haproxy start (abutcher@redhat.com)
+- Ensure nfs-utils installed for non-atomic hosts. (abutcher@redhat.com)
+
+* Fri Jan 15 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.33-1
+- Configure nodes which are also masters prior to nodes in containerized
+ install. (abutcher@redhat.com)
+- Call attention to openshift_master_rolling_restart_mode variable in restart
+ prompt. (abutcher@redhat.com)
+- Added anchors for rules in style_guide.adoc in order to make it easier to
+ reference specific rules in PRs. (twiest@redhat.com)
+- Update ec2.ini (jdetiber@redhat.com)
+
+* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.32-1
+- Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com)
+
+* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.31-1
+- Check api prior to starting node. (abutcher@redhat.com)
+- added anchors (twiest@redhat.com)
+
+* Wed Jan 13 2016 Joel Diaz <jdiaz@redhat.com> 3.0.30-1
+- Add -A and detail --v3 flags
+
+* Wed Jan 13 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.29-1
+- 3.1.1 upgrade playbook (bleanhar@redhat.com)
+- Updated help menu for v3 flag (kwoodson@redhat.com)
+- Add wait in between api and controllers start for native ha.
+ (abutcher@redhat.com)
+- atomic-openshift-installer: Error handling for unicode hostnames
+ (smunilla@redhat.com)
+- Update api verification. (abutcher@redhat.com)
+- Add a Verify API Server handler that waits for the API server to become
+ available (sdodson@redhat.com)
+- Add -A parameter to forward ssh agent (jdiaz@redhat.com)
+- Validate pacemaker cluster members. (abutcher@redhat.com)
+- Removed atomic host check (kwoodson@redhat.com)
+- Add is_containerized inputs to nosetests. (abutcher@redhat.com)
+- Add wait for API before starting controllers w/ native ha install.
+ (abutcher@redhat.com)
+- Fix for to_padded_yaml filter (jdetiber@redhat.com)
+- - sqashed to one commit (llange@redhat.com)
+- Switch to using hostnamectl as it works on atomic and rhel7
+ (sdodson@redhat.com)
+- Update rolling restart playbook for pacemaker support. Replace fail with a
+ warn and prompt if running ansible from a host that will be rebooted. Re-
+ organize playbooks. (abutcher@redhat.com)
+- Implement simple master rolling restarts. (dgoodwin@redhat.com)
+- re-enable containerize installs (sdodson@redhat.com)
+- Set portal net in master playbook (jdetiber@redhat.com)
+- Set the cli image to match osm_image in openshift_cli role
+ (sdodson@redhat.com)
+- atomic-openshift-installer: Populate new_nodes group (smunilla@redhat.com)
+- Always pull docker images (sdodson@redhat.com)
+
+* Mon Jan 11 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.28-1
+- added the rhe7-host-monitoring service file (mwoodson@redhat.com)
+- Fixing tab completion for latest metadata changes (kwoodson@redhat.com)
+- Removing some internal hostnames (bleanhar@redhat.com)
+- Fixing tab completion for latest metadata changes (kwoodson@redhat.com)
+- Make bin/cluster able to spawn OSE 3.1 clusters (lhuard@amadeus.com)
+- oso_host_monitoring role: removed the f22 and zagg client, replaced it with
+ oso-rhel7-host-monitoring container (mwoodson@redhat.com)
+
+* Fri Jan 08 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.27-1
+- Update to metadata tooling. (kwoodson@redhat.com)
+- Fix VM drive cleanup during terminate on libvirt (lhuard@amadeus.com)
+
* Fri Jan 08 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.26-1
- Bug 1296388 - fixing typo (bleanhar@redhat.com)
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
index de9f36c8a..0df77e309 100644
--- a/playbooks/adhoc/bootstrap-fedora.yml
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -1,4 +1,4 @@
-- hosts: OSv3
+- hosts: OSEv3
gather_facts: false
tasks:
- name: install python and deps for ansible modules
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index 174cea460..d24e9cafa 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -20,7 +20,7 @@
# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
#
# Notes:
-# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
# * This does a GP2 by default. Support for Provisioned IOPS has not been added
# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
# * This can be done with NO downtime on the host
@@ -36,7 +36,7 @@
vars:
cli_volume_type: gp2
- cli_volume_size: 55
+ cli_volume_size: 200
# cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
index acfa89515..10454ad11 100644
--- a/playbooks/adhoc/s3_registry/s3_registry.j2
+++ b/playbooks/adhoc/s3_registry/s3_registry.j2
@@ -9,12 +9,15 @@ storage:
s3:
accesskey: {{ aws_access_key }}
secretkey: {{ aws_secret_key }}
- region: us-east-1
- bucket: {{ clusterid }}-docker
+ region: {{ aws_bucket_region }}
+ bucket: {{ aws_bucket_name }}
encrypt: true
secure: true
v4auth: true
rootdirectory: /registry
+auth:
+ openshift:
+ realm: openshift
middleware:
repository:
- name: openshift
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
index d409b4086..0814efae2 100644
--- a/playbooks/adhoc/s3_registry/s3_registry.yml
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -1,7 +1,7 @@
---
# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
# Usage:
-# ansible-playbook s3_registry.yml -e clusterid="mycluster"
+# ansible-playbook s3_registry.yml -e clusterid="mycluster" -e aws_bucket="clusterid-docker" -e aws_region="us-east-1"
#
# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
# The 'clusterid' is the short name of your cluster.
@@ -13,6 +13,8 @@
vars:
aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+ aws_bucket_name: "{{ aws_bucket | default(clusterid ~ '-docker') }}"
+ aws_bucket_region: "{{ aws_region | lookup('env', 'S3_REGION') | default('us-east-1') }}"
tasks:
@@ -29,7 +31,7 @@
- name: Create S3 bucket
local_action:
- module: s3 bucket="{{ clusterid }}-docker" mode=create
+ module: s3 bucket="{{ aws_bucket_name }}" mode=create
- name: Set up registry environment variable
command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ac20f5f9b..36d686c8b 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -202,6 +202,10 @@
- /usr/lib/systemd/system/atomic-openshift-master-controllers.service
- /usr/lib/systemd/system/origin-master-api.service
- /usr/lib/systemd/system/origin-master-controllers.service
+ - /usr/local/bin/openshift
+ - /usr/local/bin/oadm
+ - /usr/local/bin/oc
+ - /usr/local/bin/kubectl
# Since we are potentially removing the systemd unit files for separated
# master-api and master-controllers services, so we need to reload the
diff --git a/playbooks/aws/openshift-cluster/addNodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml
index 3d88e6b23..3d88e6b23 100644
--- a/playbooks/aws/openshift-cluster/addNodes.yml
+++ b/playbooks/aws/openshift-cluster/add_nodes.yml
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
index d6b413c6f..1023f3ec1 100644
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -1,22 +1,17 @@
---
-g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
-g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
-g_master_hosts: "{{ (groups['tag_host-type_master']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
-g_node_hosts: "{{ (groups['tag_host-type_node']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
-g_nfs_hosts: "{{ (groups['tag_host-type_nfs']|default([]))
- | intersect((groups['tag_environment_' ~ cluster_id]|default([]))) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
-g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | default([]) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute']) | default([]) }}"
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index c8ee9bad4..ae12286bd 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1,5 +1,19 @@
---
debug_level: 2
+
+deployment_rhel7_ent_base:
+ # rhel-7.1, requires cloud access subscription
+ image: ami-10663b78
+ image_name:
+ region: us-east-1
+ ssh_user: ec2-user
+ sudo: yes
+ keypair: libra
+ type: m4.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+
deployment_vars:
origin:
# centos-7, requires marketplace
@@ -25,15 +39,6 @@ deployment_vars:
security_groups: [ 'public' ]
vpc_subnet:
assign_public_ip:
- enterprise:
- # rhel-7.1, requires cloud access subscription
- image: ami-10663b78
- image_name:
- region: us-east-1
- ssh_user: ec2-user
- sudo: yes
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
new file mode 100644
index 000000000..b230835c3
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
@@ -0,0 +1,17 @@
+# v3.1 minor upgrade playbook
+This upgrade will preserve all locally made configuration modifications to the
+Masters and Nodes.
+
+## Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..20fa9b10f
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,14 @@
+---
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+ g_node_hosts: "{{ groups.nodes | default([]) }}"
+ g_lb_hosts: "{{ groups.lb | default([]) }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml
diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/lookup_plugins b/playbooks/byo/openshift-master/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-master/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
new file mode 100644
index 000000000..a78a6aa3d
--- /dev/null
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -0,0 +1,4 @@
+---
+- include: ../../common/openshift-master/restart.yml
+ vars_files:
+ - ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 9a303c62d..88736ee03 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -4,7 +4,7 @@
openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
- when: deployment_type == "enterprise" and
+ when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
index b46407ed7..3a1a8ebb1 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh
+++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
@@ -1,8 +1,8 @@
#!/bin/bash
-yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | tr '\n' ' ')
+yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | tr '\n' ' ')
+yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
echo "---"
echo "curr_version: ${yum_installed}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 68df2153d..8ec379109 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -248,7 +248,31 @@
config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
- set_fact:
- master_certs_missing: True
+ openshift_master_certs_no_etcd:
+ - admin.crt
+ - master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
+ - master.server.crt
+ - openshift-master.crt
+ - openshift-registry.crt
+ - openshift-router.crt
+ - etcd.server.crt
+ openshift_master_certs_etcd:
+ - master.etcd-client.crt
+
+ - set_fact:
+ openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
+
+ - name: Check status of master certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
+ with_items: openshift_master_certs
+ register: g_master_cert_stat_result
+
+ - set_fact:
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list ) }}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: "{{ openshift.common.config_base }}/master"
@@ -262,8 +286,8 @@
| oo_flatten | unique }}"
master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups.oo_masters_to_config)
- | difference([groups.oo_first_master.0]) }}"
+ | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
+ | oo_filter_list(filter_attr='master_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_deployment_type: "{{ deployment_type }}"
roles:
@@ -398,6 +422,24 @@
- name: Ensure node service enabled
service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+ - name: Install Ceph storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
+
+ - name: Install GlusterFS storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
+
+ - name: Set sebooleans to allow gluster storage plugin access from containers
+ seboolean:
+ name: "{{ item }}"
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
+ with_items:
+ - virt_use_fusefs
+ - virt_sandbox_use_fusefs
+ register: sebool_result
+ failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
+
- set_fact:
node_update_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
new file mode 100644
index 000000000..d8336fcae
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
@@ -0,0 +1,50 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.1.z, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+ --api-version=v1
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
new file mode 100644
index 000000000..91780de09
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -0,0 +1,87 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+ - name: Determine available versions
+ script: ../files/versions.sh {{ g_new_service_name }} openshift
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
new file mode 100644
index 000000000..81dbba1e3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -0,0 +1,137 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master packages and configuration
+ hosts: oo_masters_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+
+ - name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
+
+# Currently 3.1.1 does not have any new configuration settings
+#
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.0'
+# to_version: '3.1'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+
+ - name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+ hosts: oo_masters_to_config
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 9a5ae0e6b..d23a54511 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -89,7 +89,6 @@
roles:
- etcd
- role: nickhammond.logrotate
- when: not openshift.common.is_containerized | bool
- name: Delete temporary directory on localhost
hosts: localhost
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 677c274c4..12497bf94 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -43,6 +43,7 @@
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
@@ -51,6 +52,7 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -84,6 +86,7 @@
etcd_generated_certs_dir: /etc/etcd/generated_certs
etcd_needing_client_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
+ | default([])
| oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
@@ -217,6 +220,7 @@
hosts: oo_lb_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
haproxy_frontends:
- name: atomic-openshift-api
mode: tcp
@@ -312,6 +316,7 @@
- name: Configure master instances
hosts: oo_masters_to_config
+ any_errors_fatal: true
serial: 1
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
@@ -333,9 +338,10 @@
roles:
- openshift_master
- role: nickhammond.logrotate
- when: not openshift.common.is_containerized | bool
- role: fluentd_master
when: openshift.common.use_fluentd | bool
+ - role: nuage_master
+ when: openshift.common.use_nuage | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -362,7 +368,7 @@
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
roles:
- role: cockpit
- when: not openshift.common.is_containerized and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+ when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined )
- name: Configure flannel
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
new file mode 100644
index 000000000..02449e40d
--- /dev/null
+++ b/playbooks/common/openshift-master/restart.yml
@@ -0,0 +1,151 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Validate configuration for rolling restart
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - fail:
+ msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'"
+ when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"]
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+ - role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
+# Creating a temp file on localhost, we then check each system that will
+# be rebooted to see if that file exists, if so we know we're running
+# ansible on a machine that needs a reboot, and we need to error out.
+- name: Create temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - local_action: command mktemp
+ register: mktemp
+ changed_when: false
+
+- name: Check if temp file exists on any masters
+ hosts: oo_masters_to_config
+ tasks:
+ - stat: path="{{ hostvars.localhost.mktemp.stdout }}"
+ register: exists
+ changed_when: false
+
+- name: Cleanup temp file on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
+ changed_when: false
+
+- name: Warn if restarting the system where ansible is running
+ hosts: oo_masters_to_config
+ tasks:
+ - pause:
+ prompt: >
+ Warning: Running playbook from a host that will be restarted!
+ Press CTRL+C and A to abort playbook execution. You may
+ continue by pressing ENTER but the playbook will stop
+ executing after this system has been restarted and services
+ must be verified manually. To only restart services, set
+ openshift_master_rolling_restart_mode=services in host
+ inventory and relaunch the playbook.
+ when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
+ - set_fact:
+ current_host: "{{ exists.stat.exists }}"
+ when: openshift.common.rolling_restart_mode == 'system'
+
+- name: Determine which masters are currently active
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ tasks:
+ - name: Check master service status
+ command: >
+ systemctl is-active {{ openshift.common.service_type }}-master
+ register: active_check_output
+ when: openshift.master.cluster_method | default(None) == 'pacemaker'
+ failed_when: false
+ changed_when: false
+ - set_fact:
+ is_active: "{{ active_check_output.stdout == 'active' }}"
+ when: openshift.master.cluster_method | default(None) == 'pacemaker'
+
+- name: Evaluate master groups
+ hosts: localhost
+ become: no
+ tasks:
+ - fail:
+ msg: >
+ Did not receive active status from any masters. Please verify pacemaker cluster.
+ when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('is_active')
+ | list) }}"
+ - name: Evaluate oo_active_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_active_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['is_active'] | default(false)) | bool
+ - name: Evaluate oo_current_masters
+ add_host:
+ name: "{{ item }}"
+ groups: oo_current_masters
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+ when: (hostvars[item]['current_host'] | default(false)) | bool
+
+- name: Validate pacemaker cluster
+ hosts: oo_active_masters
+ tasks:
+ - name: Retrieve pcs status
+ command: pcs status
+ register: pcs_status_output
+ changed_when: false
+ - fail:
+ msg: >
+ Pacemaker cluster validation failed. One or more nodes are not online.
+ when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
+
+- name: Restart masters
+ hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart active masters
+ hosts: oo_active_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services_pacemaker.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart current masters
+ hosts: oo_current_masters
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
new file mode 100644
index 000000000..ff206f5a2
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -0,0 +1,39 @@
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+# When cluster_method != pacemaker we can ensure the api_port is
+# available.
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port=22
+ when: openshift.master.cluster_method == 'pacemaker'
+- name: Wait for master to become available
+ command: pcs status
+ register: pcs_status_output
+ until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
+ retries: 15
+ delay: 2
+ changed_when: false
+ when: openshift.master.cluster_method == 'pacemaker'
+- fail:
+ msg: >
+ Pacemaker cluster validation failed {{ inventory hostname }} is not online.
+ when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool
diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
new file mode 100644
index 000000000..c9219e8de
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
@@ -0,0 +1,25 @@
+- name: Fail over master resource
+ command: >
+ pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+- name: Restart master system
+ # https://github.com/ansible/ansible/issues/10616
+ shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+ async: 1
+ poll: 0
+ ignore_errors: true
+ become: yes
+- name: Wait for master to start
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
new file mode 100644
index 000000000..5e539cd65
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -0,0 +1,27 @@
+- name: Restart master
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: restarted
+ when: not openshift_master_ha | bool
+- name: Restart master API
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: restarted
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Restart master controllers
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: restarted
+ # Ignore errrors since it is possible that type != simple for
+ # pre-3.1.1 installations.
+ ignore_errors: true
+ when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml
new file mode 100644
index 000000000..e738f3fb6
--- /dev/null
+++ b/playbooks/common/openshift-master/restart_services_pacemaker.yml
@@ -0,0 +1,10 @@
+- name: Restart master services
+ command: pcs resource restart master
+- name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ openshift.master.cluster_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 483a7768c..81ec9ab6d 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -154,21 +154,15 @@
validate_checksum: yes
with_items: nodes_needing_certs
-- name: Configure node instances
+- name: Deploy node certificates
hosts: oo_nodes_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
- # TODO: Prefix flannel role variables.
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
- pre_tasks:
+ tasks:
- name: Ensure certificate directory exists
file:
path: "{{ node_cert_dir }}"
state: directory
-
# TODO: notify restart node
# possibly test service started time against certificate/config file
# timestamps in node to trigger notify
@@ -177,12 +171,50 @@
src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
dest: "{{ node_cert_dir }}"
when: certs_missing
+
+- name: Evaluate node groups
+ hosts: localhost
+ become: no
+ tasks:
+ - name: Evaluate oo_containerized_master_nodes
+ add_host:
+ name: "{{ item }}"
+ groups: oo_containerized_master_nodes
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+ when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+
+- name: Configure node instances
+ hosts: oo_containerized_master_nodes
+ serial: 1
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
roles:
- openshift_node
+
+- name: Configure node instances
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ vars:
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ roles:
+ - openshift_node
+
+- name: Additional node config
+ hosts: oo_nodes_to_config
+ vars:
+ # TODO: Prefix flannel role variables.
+ openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ roles:
- role: flannel
when: openshift.common.use_flannel | bool
+ - role: nuage_node
+ when: openshift.common.use_nuage | bool
- role: nickhammond.logrotate
- when: not openshift.common.is_containerized | bool
- role: fluentd_node
when: openshift.common.use_fluentd | bool
tasks:
@@ -215,6 +247,19 @@
| oo_collect('openshift.common.hostname') }}"
openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
-
+ # Necessary because when you're on a node that's also a master the master will be
+ # restarted after the node restarts docker and it will take up to 60 seconds for
+ # systemd to start the master again
+ - name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
roles:
- openshift_manage_node
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
index 2bfcedfc9..15690e3bf 100644
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -1,22 +1,17 @@
---
-g_etcd_hosts: "{{ (groups['tag_host-type-etcd']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-g_lb_hosts: "{{ (groups['tag_host-type-lb']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-g_master_hosts: "{{ (groups['tag_host-type-master']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-g_node_hosts: "{{ (groups['tag_host-type-node']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-g_nfs_hosts: "{{ (groups['tag_host-type-nfs']|default([]))
- | intersect((groups['tag_environment-' ~ cluster_id]|default([]))) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | default([]) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute']) | default([]) }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 3231ecc8e..84a3f84d4 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -13,4 +13,3 @@
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
- openshift_use_openshift_sdn: "{{ do_we_use_openshift_sdn }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
index acf5e5110..75343dffa 100644
--- a/playbooks/gce/openshift-cluster/join_node.yml
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -48,6 +48,4 @@
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ansible_default_ipv4.address }}"
- openshift_use_openshift_sdn: true
openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
- os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index bdb39923e..f004a9e6b 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1,8 +1,12 @@
---
-do_we_use_openshift_sdn: true
-sdn_network_plugin: redhat/openshift-ovs-subnet
debug_level: 2
-# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
+
+deployment_rhel7_ent_base:
+ image: rhel-7
+ machine_type: n1-standard-1
+ ssh_user:
+ sudo: yes
+
deployment_vars:
origin:
image: preinstalled-slave-50g-v5
@@ -14,8 +18,6 @@ deployment_vars:
machine_type: n1-standard-1
ssh_user: root
sudo: no
- enterprise:
- image: rhel-7
- machine_type: n1-standard-1
- ssh_user:
- sudo: yes
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
index 198a3e4e2..15690e3bf 100644
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -1,22 +1,17 @@
---
-g_etcd_hosts: "{{ (groups['tag_host-type-etcd']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-g_lb_hosts: "{{ (groups['tag_host-type-lb']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-g_master_hosts: "{{ (groups['tag_host-type-master']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-g_node_hosts: "{{ (groups['tag_host-type-node']|default([]))
- | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-g_nfs_hosts: "{{ (groups['tag_host-type-node']|default([]))
- | intersect((groups['tag_environment-' ~ cluster_id]|default([]))) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
-g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | default([]) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute']) | default([]) }}"
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 8b170f99e..da628786b 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -5,6 +5,19 @@ libvirt_network: openshift-ansible
libvirt_uri: 'qemu:///system'
debug_level: 2
+# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
+# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
+deployment_rhel7_ent_base:
+ image:
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ sha256: "{{ lookup('oo_option', 'image_sha256') |
+ default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
+ ssh_user: openshift
+ sudo: yes
+
deployment_vars:
origin:
image:
@@ -25,18 +38,6 @@ deployment_vars:
sha256:
ssh_user: root
sudo: no
- enterprise:
- image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
- sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}"
- ssh_user: openshift
- sudo: yes
-# origin:
-# fedora:
-# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
-# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index bc586d983..1023f3ec1 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -1,22 +1,17 @@
---
-g_etcd_hosts: "{{ (groups['tag_host-type_etcd']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
-g_lb_hosts: "{{ (groups['tag_host-type_lb']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
-g_master_hosts: "{{ (groups['tag_host-type_master']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
-g_node_hosts: "{{ (groups['tag_host-type_node']|default([]))
- | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
- | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
-g_nfs_hosts: "{{ (groups['tag_host-type_nfs']|default([]))
- | intersect((groups['tag_environment_' ~ cluster_id]|default([]))) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
-g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | default([]) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}"
+
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute']) | default([]) }}"
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index f8d15999e..76cde1706 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -20,6 +20,11 @@ openstack_flavor:
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
+deployment_rhel7_ent_base:
+ image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
+ ssh_user: openshift
+ sudo: yes
+
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
@@ -29,7 +34,6 @@ deployment_vars:
image:
ssh_user: root
sudo: no
- enterprise:
- image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}"
- ssh_user: openshift
- sudo: yes
+ enterprise: "{{ deployment_rhel7_ent_base }}"
+ openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+ atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/roles/chrony/README.md b/roles/chrony/README.md
new file mode 100644
index 000000000..bf15d9669
--- /dev/null
+++ b/roles/chrony/README.md
@@ -0,0 +1,31 @@
+Role Name
+=========
+
+A role to configure chrony as the ntp client
+
+Requirements
+------------
+
+
+Role Variables
+--------------
+
+chrony_ntp_servers: a list of ntp servers to use the chrony.conf file
+
+Dependencies
+------------
+
+roles/lib_timedatectl
+
+Example Playbook
+----------------
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml
new file mode 100644
index 000000000..95576e666
--- /dev/null
+++ b/roles/chrony/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for chrony
diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml
new file mode 100644
index 000000000..1973c79e2
--- /dev/null
+++ b/roles/chrony/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart chronyd
+ service:
+ name: chronyd
+ state: restarted
diff --git a/roles/chrony/meta/main.yml b/roles/chrony/meta/main.yml
new file mode 100644
index 000000000..85595d7c3
--- /dev/null
+++ b/roles/chrony/meta/main.yml
@@ -0,0 +1,18 @@
+---
+galaxy_info:
+ author: Openshift Operations
+ description: Configure chrony as an ntp server
+ company: Red Hat
+ license: Apache 2.0
+ min_ansible_version: 1.9.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - system
+dependencies:
+- roles/lib_timedatectl
diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml
new file mode 100644
index 000000000..fae6d8e4c
--- /dev/null
+++ b/roles/chrony/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: remove ntp package
+ yum:
+ name: ntp
+ state: absent
+
+- name: ensure chrony package is installed
+ yum:
+ name: chrony
+ state: installed
+
+- name: Install /etc/chrony.conf
+ template:
+ src: chrony.conf.j2
+ dest: /etc/chrony.conf
+ owner: root
+ group: root
+ mode: 0644
+ notify:
+ - Restart chronyd
+
+- name: enabled timedatectl set-ntp yes
+ timedatectl:
+ ntp: True
+
+- name:
+ service:
+ name: chronyd
+ state: started
+ enabled: yes
diff --git a/roles/chrony/templates/chrony.conf.j2 b/roles/chrony/templates/chrony.conf.j2
new file mode 100644
index 000000000..de43b6364
--- /dev/null
+++ b/roles/chrony/templates/chrony.conf.j2
@@ -0,0 +1,45 @@
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% for server in chrony_ntp_servers %}
+server {{ server }} iburst
+{% endfor %}
+
+# Ignore stratum in source selection.
+stratumweight 0
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Enable kernel RTC synchronization.
+rtcsync
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 10 seconds.
+makestep 10 3
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Listen for commands only on localhost.
+bindcmdaddress 127.0.0.1
+bindcmdaddress ::1
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+keyfile /etc/chrony.keys
+
+# Specify the key used as password for chronyc.
+commandkey 1
+
+# Generate command key if missing.
+generatecommandkey
+
+# Disable logging of client accesses.
+noclientlog
+
+# Send a message to syslog if a clock adjustment is larger than 0.5 seconds.
+logchange 0.5
+
+logdir /var/log/chrony
+#log measurements statistics tracking
diff --git a/roles/chrony/vars/main.yml b/roles/chrony/vars/main.yml
new file mode 100644
index 000000000..061a21547
--- /dev/null
+++ b/roles/chrony/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for chrony
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 6e9f3a8bd..e83f72a3d 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -6,7 +6,7 @@
- cockpit-shell
- cockpit-bridge
- "{{ cockpit_plugins }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
- name: Enable cockpit-ws
service:
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index e83cfc33c..1e97b047b 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -11,24 +11,8 @@
action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present"
when: not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull etcd container
command: docker pull {{ openshift.etcd.etcd_image }}
- when: openshift.common.is_containerized | bool and openshift.etcd.etcd_image not in docker_images.stdout
-
-- name: Wait for etcd image
- command: >
- docker images
- register: docker_images
- until: openshift.etcd.etcd_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
- name: Install etcd container service file
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index d32f5e48c..cf7bc00a3 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,4 +1,8 @@
---
+- name: Install openssl
+ action: "{{ ansible_pkg_mgr }} name=openssl state=present"
+ when: not openshift.common.is_atomic | bool
+
- file:
path: "{{ item }}"
state: directory
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index 1c87d562a..32f972f0a 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -1,12 +1,12 @@
---
- fail:
msg: "fluentd master is not yet supported on atomic hosts"
- when: openshift.common.is_containerized | bool
+ when: openshift.common.is_atomic | bool
# TODO: Update fluentd install and configuration when packaging is complete
- name: download and install td-agent
action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml
index 8d34c0b19..9fd908687 100644
--- a/roles/fluentd_node/tasks/main.yml
+++ b/roles/fluentd_node/tasks/main.yml
@@ -1,12 +1,12 @@
---
- fail:
msg: "fluentd node is not yet supported on atomic hosts"
- when: openshift.common.is_containerized | bool
+ when: openshift.common.is_atomic | bool
# TODO: Update fluentd install and configuration when packaging is complete
- name: download and install td-agent
action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_atomic | bool
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
@@ -55,4 +55,3 @@
name: 'td-agent'
state: started
enabled: yes
-
diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml
index 7ba5bd485..937d94209 100644
--- a/roles/haproxy/defaults/main.yml
+++ b/roles/haproxy/defaults/main.yml
@@ -1,4 +1,6 @@
---
+haproxy_frontend_port: 80
+
haproxy_frontends:
- name: main
binds:
@@ -18,4 +20,4 @@ os_firewall_allow:
- service: haproxy stats
port: "9000/tcp"
- service: haproxy balance
- port: "8443/tcp"
+ port: "{{ haproxy_frontend_port }}/tcp"
diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml
index ee60adcab..5b8691b26 100644
--- a/roles/haproxy/handlers/main.yml
+++ b/roles/haproxy/handlers/main.yml
@@ -3,3 +3,4 @@
service:
name: haproxy
state: restarted
+ when: not (haproxy_start_result_changed | default(false) | bool)
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
index 97f870829..0b8370ce2 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/haproxy/tasks/main.yml
@@ -19,6 +19,5 @@
enabled: yes
register: start_result
-- name: Pause 30 seconds if haproxy was just started
- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ haproxy_start_result_changed: "{{ start_result | changed }}"
diff --git a/roles/lib_timedatectl/library/timedatectl.py b/roles/lib_timedatectl/library/timedatectl.py
new file mode 100644
index 000000000..b6eab5918
--- /dev/null
+++ b/roles/lib_timedatectl/library/timedatectl.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+'''
+ timedatectl ansible module
+
+ This module supports setting ntp enabled
+'''
+import subprocess
+
+
+
+
+def do_timedatectl(options=None):
+ ''' subprocess timedatectl '''
+
+ cmd = ['/usr/bin/timedatectl']
+ if options:
+ cmd += options.split()
+
+ proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE)
+ proc.wait()
+ return proc.stdout.read()
+
+def main():
+ ''' Ansible module for timedatectl
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ #state=dict(default='enabled', type='str'),
+ ntp=dict(default=True, type='bool'),
+ ),
+ #supports_check_mode=True
+ )
+
+ # do something
+ ntp_enabled = False
+
+ results = do_timedatectl()
+
+ for line in results.split('\n'):
+ if 'NTP enabled' in line:
+ if 'yes' in line:
+ ntp_enabled = True
+
+ ########
+ # Enable NTP
+ ########
+ if module.params['ntp']:
+ if ntp_enabled:
+ module.exit_json(changed=False, results="enabled", state="enabled")
+
+ # Enable it
+ # Commands to enable ntp
+ else:
+ results = do_timedatectl('set-ntp yes')
+ module.exit_json(changed=True, results="enabled", state="enabled", cmdout=results)
+
+ #########
+ # Disable NTP
+ #########
+ else:
+ if not ntp_enabled:
+ module.exit_json(changed=False, results="disabled", state="disabled")
+
+ results = do_timedatectl('set-ntp no')
+ module.exit_json(changed=True, results="disabled", state="disabled")
+
+ module.exit_json(failed=True, changed=False, results="Something went wrong", state="unknown")
+
+# Pylint is getting in the way of basic Ansible
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py
index c08bef4f7..2f9524556 100644
--- a/roles/lib_zabbix/library/zbx_action.py
+++ b/roles/lib_zabbix/library/zbx_action.py
@@ -81,6 +81,61 @@ def filter_differences(zabbix_filters, user_filters):
return rval
+def opconditions_diff(zab_val, user_val):
+ ''' Report whether there are differences between opconditions on
+ zabbix and opconditions supplied by user '''
+
+ if len(zab_val) != len(user_val):
+ return True
+
+ for z_cond, u_cond in zip(zab_val, user_val):
+ if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
+ ['conditiontype', 'operator', 'value']]):
+ return True
+
+ return False
+
+def opmessage_diff(zab_val, user_val):
+ ''' Report whether there are differences between opmessage on
+ zabbix and opmessage supplied by user '''
+
+ for op_msg_key, op_msg_val in user_val.items():
+ if zab_val[op_msg_key] != str(op_msg_val):
+ return True
+
+ return False
+
+def opmessage_grp_diff(zab_val, user_val):
+ ''' Report whether there are differences between opmessage_grp
+ on zabbix and opmessage_grp supplied by user '''
+
+ zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val])
+ usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val])
+ if usr_grp_ids != zab_grp_ids:
+ return True
+
+ return False
+
+def opmessage_usr_diff(zab_val, user_val):
+ ''' Report whether there are differences between opmessage_usr
+ on zabbix and opmessage_usr supplied by user '''
+
+ zab_usr_ids = set([usr['usrid'] for usr in zab_val])
+ usr_ids = set([usr['usrid'] for usr in user_val])
+ if usr_ids != zab_usr_ids:
+ return True
+
+ return False
+
+def opcommand_diff(zab_op_cmd, usr_op_cmd):
+ ''' Check whether user-provided opcommand matches what's already
+ stored in Zabbix '''
+
+ for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
+ if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
+ return True
+ return False
+
def host_in_zabbix(zab_hosts, usr_host):
''' Check whether a particular user host is already in the
Zabbix list of hosts '''
@@ -106,23 +161,11 @@ def hostlist_in_zabbix(zab_hosts, usr_hosts):
return True
-def opcommand_diff(zab_op_cmd, usr_op_cmd):
- ''' Check whether user-provided opcommand matches what's already
- stored in Zabbix '''
-
- for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
- if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
- return True
- return False
-
-# This logic is quite complex. We are comparing two lists of dictionaries.
-# The outer for-loops allow us to descend down into both lists at the same time
-# and then walk over the key,val pairs of the incoming user dict's changes
-# or updates. The if-statements are looking at different sub-object types and
-# comparing them. The other suggestion on how to write this is to write a recursive
-# compare function but for the time constraints and for complexity I decided to go
-# this route.
-# pylint: disable=too-many-branches
+# We are comparing two lists of dictionaries (the one stored on zabbix and the
+# one the user is providing). For each type of operation, determine whether there
+# is a difference between what is stored on zabbix and what the user is providing.
+# If there is a difference, we take the user-provided data for what needs to
+# be stored/updated into zabbix.
def operation_differences(zabbix_ops, user_ops):
'''Determine the differences from user and zabbix for operations'''
@@ -132,49 +175,41 @@ def operation_differences(zabbix_ops, user_ops):
rval = {}
for zab, user in zip(zabbix_ops, user_ops):
- for key, val in user.items():
- if key == 'opconditions':
- if len(zab[key]) != len(val):
- rval[key] = val
- break
- for z_cond, u_cond in zip(zab[key], user[key]):
- if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
- ['conditiontype', 'operator', 'value']]):
- rval[key] = val
- break
- elif key == 'opmessage':
- # Verify each passed param matches
- for op_msg_key, op_msg_val in val.items():
- if zab[key][op_msg_key] != str(op_msg_val):
- rval[key] = val
- break
-
- elif key == 'opmessage_grp':
- zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]])
- usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val])
- if usr_grp_ids != zab_grp_ids:
- rval[key] = val
-
- elif key == 'opmessage_usr':
- zab_usr_ids = set([usr['userid'] for usr in zab[key]])
- usr_ids = set([usr['userid'] for usr in val])
- if usr_ids != zab_usr_ids:
- rval[key] = val
-
- elif key == 'opcommand':
- if opcommand_diff(zab[key], val):
- rval[key] = val
- break
+ for oper in user.keys():
+ if oper == 'opconditions' and opconditions_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opmessage' and opmessage_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
+
+ elif oper == 'opcommand' and opcommand_diff(zab[oper], \
+ user[oper]):
+ rval[oper] = user[oper]
# opcommand_grp can be treated just like opcommand_hst
# as opcommand_grp[] is just a list of groups
- elif key == 'opcommand_hst' or key == 'opcommand_grp':
- if not hostlist_in_zabbix(zab[key], val):
- rval[key] = val
- break
+ elif oper == 'opcommand_hst' or oper == 'opcommand_grp':
+ if not hostlist_in_zabbix(zab[oper], user[oper]):
+ rval[oper] = user[oper]
+
+ # if it's any other type of operation than the ones tested above
+ # just do a direct compare
+ elif oper not in ['opconditions', 'opmessage', 'opmessage_grp',
+ 'opmessage_usr', 'opcommand', 'opcommand_hst',
+ 'opcommand_grp'] \
+ and str(zab[oper]) != str(user[oper]):
+ rval[oper] = user[oper]
- elif zab[key] != str(val):
- rval[key] = val
return rval
def get_users(zapi, users):
diff --git a/roles/lib_zabbix/library/zbx_host.py b/roles/lib_zabbix/library/zbx_host.py
index e26c9caf3..560749f07 100644
--- a/roles/lib_zabbix/library/zbx_host.py
+++ b/roles/lib_zabbix/library/zbx_host.py
@@ -63,6 +63,19 @@ def get_template_ids(zapi, template_names):
template_ids.append({'templateid': content['result'][0]['templateid']})
return template_ids
+def interfaces_equal(zbx_interfaces, user_interfaces):
+ '''
+ compare interfaces from zabbix and interfaces from user
+ '''
+
+ for u_int in user_interfaces:
+ for z_int in zbx_interfaces:
+ for u_key, u_val in u_int.items():
+ if str(z_int[u_key]) != str(u_val):
+ return False
+
+ return True
+
def main():
'''
Ansible module for zabbix host
@@ -120,8 +133,9 @@ def main():
'dns': '', # dns for host
'port': '10050', # port for interface? 10050
}]
+ hostgroup_names = list(set(module.params['hostgroup_names']))
params = {'host': hname,
- 'groups': get_group_ids(zapi, module.params['hostgroup_names']),
+ 'groups': get_group_ids(zapi, hostgroup_names),
'templates': get_template_ids(zapi, module.params['template_names']),
'interfaces': ifs,
}
@@ -140,6 +154,11 @@ def main():
if zab_results['parentTemplates'] != value:
differences[key] = value
+
+ elif key == "interfaces":
+ if not interfaces_equal(zab_results[key], value):
+ differences[key] = value
+
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 47749389e..61344357a 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -57,6 +57,7 @@
expression: "{{ item.expression }}"
priority: "{{ item.priority }}"
url: "{{ item.url | default(None, True) }}"
+ status: "{{ item.status | default('', True) }}"
with_items: template.ztriggers
when: template.ztriggers is defined
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index 0a0cf1fae..e2c51a903 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -1,6 +1,7 @@
---
- name: nickhammond.logrotate | Install logrotate
action: "{{ ansible_pkg_mgr }} name=logrotate state=present"
+ when: not openshift.common.is_atomic | bool
- name: nickhammond.logrotate | Setup logrotate.d scripts
template:
diff --git a/roles/nuage_master/README.md b/roles/nuage_master/README.md
new file mode 100644
index 000000000..de101dd19
--- /dev/null
+++ b/roles/nuage_master/README.md
@@ -0,0 +1,8 @@
+Nuage Master
+============
+Setup Nuage Kubernetes Monitor on the Master node
+
+
+Requirements
+------------
+This role assumes it has been deployed on RHEL/Fedora
diff --git a/roles/nuage_master/files/serviceaccount.sh b/roles/nuage_master/files/serviceaccount.sh
new file mode 100644
index 000000000..f6fdb8a8d
--- /dev/null
+++ b/roles/nuage_master/files/serviceaccount.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Parse CLI options
+for i in "$@"; do
+ case $i in
+ --master-cert-dir=*)
+ MASTER_DIR="${i#*=}"
+ CA_CERT=${MASTER_DIR}/ca.crt
+ CA_KEY=${MASTER_DIR}/ca.key
+ CA_SERIAL=${MASTER_DIR}/ca.serial.txt
+ ADMIN_FILE=${MASTER_DIR}/admin.kubeconfig
+ ;;
+ --server=*)
+ SERVER="${i#*=}"
+ ;;
+ --output-cert-dir=*)
+ OUTDIR="${i#*=}"
+ CONFIG_FILE=${OUTDIR}/nuage.kubeconfig
+ ;;
+ esac
+done
+
+# If any are missing, print the usage and exit
+if [ -z $SERVER ] || [ -z $OUTDIR ] || [ -z $MASTER_DIR ]; then
+ echo "Invalid syntax: $@"
+ echo "Usage:"
+ echo " $0 --server=<address>:<port> --output-cert-dir=/path/to/output/dir/ --master-cert-dir=/path/to/master/"
+ echo "--master-cert-dir: Directory where the master's configuration is held"
+ echo "--server: Address of Kubernetes API server (default port is 8443)"
+ echo "--output-cert-dir: Directory to put artifacts in"
+ echo ""
+ echo "All options are required"
+ exit 1
+fi
+
+# Login as admin so that we can create the service account
+oc login -u system:admin --config=$ADMIN_FILE || exit 1
+oc project default --config=$ADMIN_FILE
+
+ACCOUNT_CONFIG='
+{
+ "apiVersion": "v1",
+ "kind": "ServiceAccount",
+ "metadata": {
+ "name": "nuage"
+ }
+}
+'
+
+# Create the account with the included info
+echo $ACCOUNT_CONFIG|oc create --config=$ADMIN_FILE -f -
+
+# Add the cluser-reader role, which allows this service account read access to
+# everything in the cluster except secrets
+oadm policy add-cluster-role-to-user cluster-reader system:serviceaccounts:default:nuage --config=$ADMIN_FILE
+
+# Generate certificates and a kubeconfig for the service account
+oadm create-api-client-config --certificate-authority=${CA_CERT} --client-dir=${OUTDIR} --signer-cert=${CA_CERT} --signer-key=${CA_KEY} --signer-serial=${CA_SERIAL} --user=system:serviceaccounts:default:nuage --master=${SERVER} --public-master=${SERVER} --basename='nuage'
+
+# Verify the finalized kubeconfig
+if ! [ $(oc whoami --config=$CONFIG_FILE) == 'system:serviceaccounts:default:nuage' ]; then
+ echo "Service account creation failed!"
+ exit 1
+fi
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
new file mode 100644
index 000000000..635d8a419
--- /dev/null
+++ b/roles/nuage_master/handlers/main.yaml
@@ -0,0 +1,18 @@
+---
+- name: restart nuagekubemon
+ sudo: true
+ service: name=nuagekubemon state=restarted
+
+- name: restart master
+ service: name={{ openshift.common.service_type }}-master state=restarted
+ when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+
+# TODO: need to fix up ignore_errors here
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ ignore_errors: yes
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
new file mode 100644
index 000000000..a7baadc76
--- /dev/null
+++ b/roles/nuage_master/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+- name: Create directory /usr/share/nuagekubemon
+ sudo: true
+ file: path=/usr/share/nuagekubemon state=directory
+
+- name: Create the log directory
+ sudo: true
+ file: path={{ nuagekubemon_log_dir }} state=directory
+
+- name: Install Nuage Kubemon
+ sudo: true
+ yum: name={{ nuage_kubemon_rpm }} state=present
+
+- name: Run the service account creation script
+ sudo: true
+ script: serviceaccount.sh --server={{ openshift.master.api_url }} --output-cert-dir={{ cert_output_dir }} --master-cert-dir={{ openshift_master_config_dir }}
+
+- name: Download the certs and keys
+ sudo: true
+ fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
+ with_items:
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
+
+- name: Create nuagekubemon.yaml
+ sudo: true
+ template: src=nuagekubemon.j2 dest=/usr/share/nuagekubemon/nuagekubemon.yaml owner=root mode=0644
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
+ - restart nuagekubemon
diff --git a/roles/nuage_master/templates/nuagekubemon.j2 b/roles/nuage_master/templates/nuagekubemon.j2
new file mode 100644
index 000000000..fb586bcee
--- /dev/null
+++ b/roles/nuage_master/templates/nuagekubemon.j2
@@ -0,0 +1,19 @@
+# .kubeconfig that includes the nuage service account
+kubeConfig: {{ kube_config }}
+# name of the nuage service account, or another account with 'cluster-reader'
+# permissions
+# Openshift master config file
+openshiftMasterConfig: {{ master_config_yaml }}
+# URL of the VSD Architect
+vsdApiUrl: {{ vsd_api_url }}
+# API version to query against. Usually "v3_2"
+vspVersion: {{ vsp_version }}
+# File containing a VSP license to install. Only necessary if no license has
+# been installed on the VSD Architect before, only valid for standalone vsd install
+# licenseFile: "/path/to/base_vsp_license.txt"
+# Name of the enterprise in which pods will reside
+enterpriseName: {{ enterprise }}
+# Name of the domain in which pods will reside
+domainName: {{ domain }}
+# Location where logs should be saved
+log_dir: {{ nuagekubemon_log_dir }}
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
new file mode 100644
index 000000000..db901fea6
--- /dev/null
+++ b/roles/nuage_master/vars/main.yaml
@@ -0,0 +1,7 @@
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
+cert_output_dir: /usr/share/nuagekubemon
+kube_config: /usr/share/nuagekubemon/nuage.kubeconfig
+kubemon_yaml: /usr/share/nuagekubemon/nuagekubemon.yaml
+master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"
diff --git a/roles/nuage_node/README.md b/roles/nuage_node/README.md
new file mode 100644
index 000000000..02a3cbc77
--- /dev/null
+++ b/roles/nuage_node/README.md
@@ -0,0 +1,9 @@
+Nuage Node
+==========
+
+Setup Nuage VRS (Virtual Routing Switching) on the Openshift Node
+
+Requirements
+------------
+
+This role assumes it has been deployed on RHEL/Fedora
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
new file mode 100644
index 000000000..25482a845
--- /dev/null
+++ b/roles/nuage_node/handlers/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: restart vrs
+ sudo: true
+ service: name=openvswitch state=restarted
+
+- name: restart node
+ sudo: true
+ service: name={{ openshift.common.service_type }}-node state=restarted
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
new file mode 100644
index 000000000..e0117bf71
--- /dev/null
+++ b/roles/nuage_node/tasks/main.yaml
@@ -0,0 +1,37 @@
+---
+- name: Install Nuage VRS
+ sudo: true
+ yum: name={{ vrs_rpm }} state=present
+
+- name: Set the uplink interface
+ sudo: true
+ lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
+
+- name: Set the Active Controller
+ sudo: true
+ lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
+
+- name: Set the Standby Controller
+ sudo: true
+ lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
+ when: vsc_standby_ip is defined
+
+- name: Install plugin rpm
+ sudo: true
+ yum: name={{ plugin_rpm }} state=present
+
+- name: Copy the certificates and keys
+ sudo: true
+ copy: src="/tmp/{{ item }}" dest="{{ vsp_k8s_dir }}/{{ item }}"
+ with_items:
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
+
+- name: Set the vsp-k8s.yaml
+ sudo: true
+ template: src=vsp-k8s.j2 dest={{ vsp_k8s_yaml }} owner=root mode=0644
+ notify:
+ - restart vrs
+ - restart node
diff --git a/roles/nuage_node/templates/vsp-k8s.j2 b/roles/nuage_node/templates/vsp-k8s.j2
new file mode 100644
index 000000000..98d6c3a9c
--- /dev/null
+++ b/roles/nuage_node/templates/vsp-k8s.j2
@@ -0,0 +1,14 @@
+clientCert: {{ client_cert }}
+# The key to the certificate in clientCert above
+clientKey: {{ client_key }}
+# The certificate authority's certificate for the local kubelet. Usually the
+# same as the CA cert used to create the client Cert/Key pair.
+CACert: {{ ca_cert }}
+# Name of the enterprise in which pods will reside
+enterpriseName: {{ enterprise }}
+# Name of the domain in which pods will reside
+domainName: {{ domain }}
+# IP address and port number of master API server
+masterApiServer: {{ api_server }}
+# Bridge name for the docker bridge
+dockerBridgeName: {{ docker_bridge }}
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
new file mode 100644
index 000000000..a6b7cf997
--- /dev/null
+++ b/roles/nuage_node/vars/main.yaml
@@ -0,0 +1,9 @@
+---
+vrs_config: /etc/default/openvswitch
+vsp_k8s_dir: /usr/share/vsp-k8s
+vsp_k8s_yaml: "{{ vsp_k8s_dir }}/vsp-k8s.yaml"
+client_cert: "{{ vsp_k8s_dir }}/nuage.crt"
+client_key: "{{ vsp_k8s_dir }}/nuage.key"
+ca_cert: "{{ vsp_k8s_dir }}/ca.crt"
+api_server: "{{ openshift_node_master_api_url }}"
+docker_bridge: "docker0"
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 8d7686ffd..a6b6b1925 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -3,32 +3,17 @@
role: common
local_facts:
deployment_type: "{{ openshift_deployment_type }}"
+ cli_image: "{{ osm_image | default(None) }}"
- name: Install clients
- yum: pkg={{ openshift.common.service_type }}-clients state=installed
+ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-clients state=present"
when: not openshift.common.is_containerized | bool
-- name: List Docker images
- command: >
- docker images
- register: docker_images
-
- name: Pull CLI Image
command: >
docker pull {{ openshift.common.cli_image }}
- when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
-
-- name: Wait for CLI image
- command: >
- docker images
- register: docker_images
- until: openshift.common.cli_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
-
- name: Create /usr/local/bin/openshift cli wrapper
template:
src: openshift.j2
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 0ee873a2b..ff8c3b50f 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -4,6 +4,14 @@
when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
- fail:
+ msg: Nuage sdn can not be used with openshift sdn
+ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+- fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+- fail:
msg: openshift_hostname must be 64 characters or less
when: openshift_hostname is defined and openshift_hostname | length > 64
@@ -23,7 +31,9 @@
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
use_flannel: "{{ openshift_use_flannel | default(None) }}"
+ use_nuage: "{{ openshift_use_nuage | default(None) }}"
use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
+ data_dir: "{{ openshift_data_dir | default(None) }}"
- name: Install the base package for versioning
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present"
@@ -38,5 +48,6 @@
set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}"
- name: Set hostname
- hostname: name={{ openshift.common.hostname }}
+ command: >
+ hostnamectl set-hostname {{ openshift.common.hostname }}
when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 50816d319..b163f8aae 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -5,3 +5,4 @@
# chains with the public zone (or the zone associated with the correct
# interfaces)
os_firewall_use_firewalld: False
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 2a3d4acbd..40e54d706 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -188,9 +188,6 @@ def normalize_gce_facts(metadata, facts):
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
- # Default to no sdn for GCE deployments
- facts['use_openshift_sdn'] = False
-
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
@@ -341,6 +338,23 @@ def set_flannel_facts_if_unset(facts):
facts['common']['use_flannel'] = use_flannel
return facts
+def set_nuage_facts_if_unset(facts):
+ """ Set nuage facts if not already present in facts dict
+ dict: the facts dict updated with the nuage facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the nuage
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ if 'use_nuage' not in facts['common']:
+ use_nuage = False
+ facts['common']['use_nuage'] = use_nuage
+ return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -461,52 +475,68 @@ def set_url_facts_if_unset(facts):
were not already present
"""
if 'master' in facts:
- api_use_ssl = facts['master']['api_use_ssl']
- api_port = facts['master']['api_port']
- console_use_ssl = facts['master']['console_use_ssl']
- console_port = facts['master']['console_port']
- console_path = facts['master']['console_path']
- etcd_use_ssl = facts['master']['etcd_use_ssl']
- etcd_hosts = facts['master']['etcd_hosts']
- etcd_port = facts['master']['etcd_port']
hostname = facts['common']['hostname']
- public_hostname = facts['common']['public_hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
+ public_hostname = facts['common']['public_hostname']
+ api_hostname = cluster_hostname if cluster_hostname else hostname
+ api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
+ console_path = facts['master']['console_path']
+ etcd_hosts = facts['master']['etcd_hosts']
+
+ use_ssl = dict(
+ api=facts['master']['api_use_ssl'],
+ public_api=facts['master']['api_use_ssl'],
+ loopback_api=facts['master']['api_use_ssl'],
+ console=facts['master']['console_use_ssl'],
+ public_console=facts['master']['console_use_ssl'],
+ etcd=facts['master']['etcd_use_ssl']
+ )
+
+ ports = dict(
+ api=facts['master']['api_port'],
+ public_api=facts['master']['api_port'],
+ loopback_api=facts['master']['api_port'],
+ console=facts['master']['console_port'],
+ public_console=facts['master']['console_port'],
+ etcd=facts['master']['etcd_port'],
+ )
+
+ etcd_urls = []
+ if etcd_hosts != '':
+ facts['master']['etcd_port'] = ports['etcd']
+ facts['master']['embedded_etcd'] = False
+ for host in etcd_hosts:
+ etcd_urls.append(format_url(use_ssl['etcd'], host,
+ ports['etcd']))
+ else:
+ etcd_urls = [format_url(use_ssl['etcd'], hostname,
+ ports['etcd'])]
+
+ facts['master'].setdefault('etcd_urls', etcd_urls)
+
+ prefix_hosts = [('api', api_hostname),
+ ('public_api', api_public_hostname),
+ ('loopback_api', hostname)]
+
+ for prefix, host in prefix_hosts:
+ facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
+ host,
+ ports[prefix]))
+
+
+ r_lhn = "{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
+ facts['master'].setdefault('loopback_cluster_name', r_lhn)
+ facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
+ facts['master'].setdefault('loopback_user', "system:openshift-master/{0}".format(r_lhn))
+
+ prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
+ for prefix, host in prefix_hosts:
+ facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
+ host,
+ ports[prefix],
+ console_path))
- if 'etcd_urls' not in facts['master']:
- etcd_urls = []
- if etcd_hosts != '':
- facts['master']['etcd_port'] = etcd_port
- facts['master']['embedded_etcd'] = False
- for host in etcd_hosts:
- etcd_urls.append(format_url(etcd_use_ssl, host,
- etcd_port))
- else:
- etcd_urls = [format_url(etcd_use_ssl, hostname,
- etcd_port)]
- facts['master']['etcd_urls'] = etcd_urls
- if 'api_url' not in facts['master']:
- api_hostname = cluster_hostname if cluster_hostname else hostname
- facts['master']['api_url'] = format_url(api_use_ssl, api_hostname,
- api_port)
- if 'public_api_url' not in facts['master']:
- api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
- facts['master']['public_api_url'] = format_url(api_use_ssl,
- api_public_hostname,
- api_port)
- if 'console_url' not in facts['master']:
- console_hostname = cluster_hostname if cluster_hostname else hostname
- facts['master']['console_url'] = format_url(console_use_ssl,
- console_hostname,
- console_port,
- console_path)
- if 'public_console_url' not in facts['master']:
- console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
- facts['master']['public_console_url'] = format_url(console_use_ssl,
- console_public_hostname,
- console_port,
- console_path)
return facts
def set_aggregate_facts(facts):
@@ -884,10 +914,6 @@ def apply_provider_facts(facts, provider_facts):
if not provider_facts:
return facts
- use_openshift_sdn = provider_facts.get('use_openshift_sdn')
- if isinstance(use_openshift_sdn, bool):
- facts['common']['use_openshift_sdn'] = use_openshift_sdn
-
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
@@ -1038,6 +1064,10 @@ def set_container_facts_if_unset(facts):
if 'ovs_image' not in facts['node']:
facts['node']['ovs_image'] = ovs_image
+ if facts['common']['is_containerized']:
+ facts['common']['admin_binary'] = '/usr/local/bin/oadm'
+ facts['common']['client_binary'] = '/usr/local/bin/oc'
+
return facts
@@ -1078,7 +1108,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd', 'nfs']
+ known_roles = ['common', 'master', 'node', 'etcd', 'nfs']
def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False):
self.changed = False
@@ -1115,6 +1145,7 @@ class OpenShiftFacts(object):
facts = set_project_cfg_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
facts = set_flannel_facts_if_unset(facts)
+ facts = set_nuage_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
@@ -1156,7 +1187,7 @@ class OpenShiftFacts(object):
defaults['common'] = common
if 'master' in roles:
- master = dict(api_use_ssl=True, api_port='8443',
+ master = dict(api_use_ssl=True, api_port='8443', controllers_port='8444',
console_use_ssl=True, console_path='/console',
console_port='8443', etcd_use_ssl=True, etcd_hosts='',
etcd_port='4001', portal_net='172.30.0.0/16',
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 87fa99a3b..0dbac1b54 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,15 +1,14 @@
---
-- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1
- assert:
- that:
- - ansible_version | version_compare('1.8.0', 'ge')
- - ansible_version | version_compare('1.9.0', 'ne')
- - ansible_version | version_compare('1.9.0.1', 'ne')
-
+- name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version }} found"
+ when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
+
- name: Detecting Operating System
shell: ls /run/ostree-booted
ignore_errors: yes
failed_when: false
+ changed_when: false
register: ostree_output
# Locally setup containerized facts for now
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 9766d01ae..1f74d851a 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -6,7 +6,9 @@ os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
- service: api server https
- port: 8443/tcp
+ port: "{{ openshift.master.api_port }}/tcp"
+- service: api controllers https
+ port: "{{ openshift.master.controllers_port }}/tcp"
- service: dns tcp
port: 53/tcp
- service: dns udp
@@ -24,7 +26,5 @@ os_firewall_allow:
os_firewall_deny:
- service: api server http
port: 8080/tcp
-- service: former web console port
- port: 8444/tcp
- service: former etcd peer port
port: 7001/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index e1b95eda4..6b9992eea 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -2,11 +2,24 @@
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
when: (not openshift_master_ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
- name: restart master api
service: name={{ openshift.common.service_type }}-master-api state=restarted
when: (openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
- name: restart master controllers
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 397122631..57b50bee4 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -42,7 +42,12 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
logging_public_url: "{{ openshift_master_logging_public_url | default(None) }}"
metrics_public_url: "{{ openshift_master_metrics_public_url | default(None) }}"
- etcd_hosts: "{{ openshift_master_etcd_hosts | default(None)}}"
+ logout_url: "{{ openshift_master_logout_url | default(None) }}"
+ extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}"
+ extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}"
+ extensions: "{{ openshift_master_extensions | default(None) }}"
+ oauth_template: "{{ openshift_master_oauth_template | default(None) }}"
+ etcd_hosts: "{{ openshift_master_etcd_hosts | default(None) }}"
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
@@ -51,6 +56,7 @@
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
dns_port: "{{ openshift_master_dns_port | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
+ pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"
portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
@@ -86,36 +92,20 @@
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=present"
when: not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull master image
command: >
docker pull {{ openshift.master.master_image }}
- when: openshift.common.is_containerized | bool and openshift.master.master_image not in docker_images.stdout
-
-- name: Wait for master image
- command: >
- docker images
- register: docker_images
- until: openshift.master.master_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
- name: Install Master docker service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
- src: master.docker.service.j2
+ src: docker/master.docker.service.j2
register: install_result
when: openshift.common.is_containerized | bool and not openshift_master_ha | bool
-
+
- name: Create openshift.common.data_dir
- file:
+ file:
path: "{{ openshift.common.data_dir }}"
state: directory
mode: 0755
@@ -190,31 +180,42 @@
when: openshift.common.is_containerized | bool
# workaround for missing systemd unit files for controllers/api
-- name: Create the api service file
+- name: Create the systemd unit files
template:
- src: atomic-openshift-master-api{{ ha_suffix }}.service.j2
- dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-api.service"
+ src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"
+ dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"
when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- name: Create the controllers service file
- template:
- src: atomic-openshift-master-controllers{{ ha_suffix }}.service.j2
- dest: "{{ ha_svcdir }}/{{ openshift.common.service_type }}-master-controllers.service"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- name: Create the api env file
+ with_items:
+ - api
+ - controllers
+ register: create_unit_files
+
+- command: systemctl daemon-reload
+ when: create_unit_files | changed
+# end workaround for missing systemd unit files
+
+- name: Create the master api service env file
template:
- src: atomic-openshift-master-api.j2
+ src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
- force: no
when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- name: Create the controllers env file
+ notify:
+ - restart master api
+
+- name: Create the master controllers service env file
template:
- src: atomic-openshift-master-controllers.j2
+ src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
- force: no
when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-- command: systemctl daemon-reload
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
-# end workaround for missing systemd unit files
+ notify:
+ - restart master controllers
+
+- name: Create the master service env file
+ template:
+ src: "atomic-openshift-master.j2"
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
+ notify:
+ - restart master
- name: Create session secrets file
template:
@@ -239,52 +240,42 @@
- restart master api
- restart master controllers
-- name: Configure master settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: yes
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift_master_config_file }}"
- notify:
- - restart master
-
-- name: Configure master api settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift_master_config_file }}"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
- notify:
- - restart master api
-
-- name: Configure master controller settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift_master_config_file }}"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
- notify:
- - restart master controllers
+- name: Test local loopback context
+ command: >
+ {{ openshift.common.client_binary }} config view
+ --config={{ openshift_master_loopback_config }}
+ changed_when: false
+ register: loopback_config
+
+- command: >
+ {{ openshift.common.client_binary }} config set-cluster
+ --certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ --embed-certs=true --server={{ openshift.master.loopback_api_url }}
+ {{ openshift.master.loopback_cluster_name }}
+ --config={{ openshift_master_loopback_config }}
+ when: loopback_context_string not in loopback_config.stdout
+ register: set_loopback_cluster
+
+- command: >
+ {{ openshift.common.client_binary }} config set-context
+ --cluster={{ openshift.master.loopback_cluster_name }}
+ --namespace=default --user={{ openshift.master.loopback_user }}
+ {{ openshift.master.loopback_context_name }}
+ --config={{ openshift_master_loopback_config }}
+ when: set_loopback_cluster | changed
+ register: set_loopback_context
+
+- command: >
+ {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
+ --config={{ openshift_master_loopback_config }}
+ when: set_loopback_context | changed
+ register: set_current_context
- name: Start and enable master
service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
register: start_result
+ notify: Verify API Server
- name: Stop and disable non HA master when running HA
service: name={{ openshift.common.service_type }}-master enabled=no state=stopped
@@ -303,6 +294,20 @@
master_api_service_status_changed: "{{ start_result | changed }}"
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+# A separate wait is required here for native HA since notifies will
+# be resolved after all tasks in the role.
+- name: Wait for API to become available
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift.master.api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
+
- name: Start and enable master controller
service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 205934248..81bae5470 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,5 +1,5 @@
-OPTIONS=
-CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+OPTIONS=--loglevel={{ openshift.master.debug_level }}
+CONFIG_FILE={{ openshift_master_config_file }}
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2
new file mode 120000
index 000000000..4bb7095ee
--- /dev/null
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.j2
@@ -0,0 +1 @@
+../native-cluster/atomic-openshift-master-api.j2 \ No newline at end of file
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 936c39edf..a935b82f6 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-api.docker.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -23,4 +23,4 @@ Restart=always
[Install]
WantedBy=multi-user.target
-WantedBy={{ openshift.common.service_type }}-node.service \ No newline at end of file
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2
new file mode 120000
index 000000000..8714ebbae
--- /dev/null
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.j2
@@ -0,0 +1 @@
+../native-cluster/atomic-openshift-master-controllers.j2 \ No newline at end of file
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 6ba7d6e2a..6ba7d6e2a 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-controllers.docker.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
diff --git a/roles/openshift_master/templates/master.docker.service.j2 b/roles/openshift_master/templates/docker/master.docker.service.j2
index 23781a313..23781a313 100644
--- a/roles/openshift_master/templates/master.docker.service.j2
+++ b/roles/openshift_master/templates/docker/master.docker.service.j2
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 647476b7f..1eeab46fe 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -5,7 +5,7 @@ apiLevels:
- v1
apiVersion: v1
assetConfig:
- logoutURL: ""
+ logoutURL: "{{ openshift.master.logout_url | default('') }}"
masterPublicURL: {{ openshift.master.public_api_url }}
publicURL: {{ openshift.master.public_console_url }}/
{% if 'logging_public_url' in openshift.master %}
@@ -14,6 +14,15 @@ assetConfig:
{% if 'metrics_public_url' in openshift.master %}
metricsPublicURL: {{ openshift.master.metrics_public_url }}
{% endif %}
+{% if 'extension_scripts' in openshift.master %}
+ extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }}
+{% endif %}
+{% if 'extension_stylesheets' in openshift.master %}
+ extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }}
+{% endif %}
+{% if 'extensions' in openshift.master %}
+ extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }}
+{% endif %}
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
bindNetwork: tcp4
@@ -87,11 +96,11 @@ kubernetesMasterConfig:
- v1beta3
- v1
{% endif %}
- apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_json }}
- controllerArguments: {{ openshift.master.controller_args | default(None) | to_json }}
+ apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+ controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
- podEvictionTimeout: ""
+ podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
@@ -108,12 +117,16 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
oauthConfig:
+{% if 'oauth_template' in openshift.master %}
+ templates:
+ login: {{ openshift.master.oauth_template }}
+{% endif %}
assetPublicURL: {{ openshift.master.public_console_url }}/
grantConfig:
method: {{ openshift.master.oauth_grant_method }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
new file mode 100644
index 000000000..48bfa5f04
--- /dev/null
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -0,0 +1,9 @@
+OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
+CONFIG_FILE={{ openshift_master_config_file }}
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index ba19fb348..ba19fb348 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 205934248..cdc56eece 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,5 +1,5 @@
-OPTIONS=
-CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
+CONFIG_FILE={{ openshift_master_config_file }}
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index e6e97b24f..e6e97b24f 100644
--- a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 534465451..fe88c3c16 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,11 +1,16 @@
---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
openshift_version: "{{ openshift_pkg_version | default('') }}"
+ha_svc_template_path: "{{ 'docker-cluster' if openshift.common.is_containerized | bool else 'native-cluster' }}"
+ha_svc_svc_dir: "{{ '/etc/systemd/system' if openshift.common.is_containerized | bool else '/usr/lib/systemd/system' }}"
+
openshift_master_valid_grant_methods:
- auto
- prompt
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 5b4c92f2b..6d9be81c0 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -13,16 +13,10 @@
path: "{{ openshift_master_config_dir }}"
state: directory
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
-- name: Pull required docker image
+- name: Pull master docker image
command: >
docker pull {{ openshift.common.cli_image }}
- when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
+ when: openshift.common.is_containerized | bool
- name: Create the master certificates if they do not already exist
command: >
diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml
index 7ab9afb51..1b94598dd 100644
--- a/roles/openshift_master_cluster/tasks/configure.yml
+++ b/roles/openshift_master_cluster/tasks/configure.yml
@@ -34,11 +34,10 @@
- name: Disable stonith
command: pcs property set stonith-enabled=false
-# TODO: handle case where api port is not 8443
- name: Wait for the clustered master service to be available
wait_for:
host: "{{ openshift_master_cluster_vip }}"
- port: 8443
+ port: "{{ openshift.master.api_port }}"
state: started
timeout: 180
delay: 90
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 33852d7f8..9035248f9 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -44,41 +44,14 @@
action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present"
when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
-- name: Get docker images
- command: docker images
- changed_when: false
- when: openshift.common.is_containerized | bool
- register: docker_images
-
- name: Pull node image
command: >
docker pull {{ openshift.node.node_image }}
- when: openshift.common.is_containerized | bool and openshift.node.node_image not in docker_images.stdout
-
-- name: Wait for node image
- command: >
- docker images
- register: docker_images
- until: openshift.node.node_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool
-
+
- name: Pull OpenVSwitch image
command: >
docker pull {{ openshift.node.ovs_image }}
- when: openshift.common.is_containerized | bool and openshift.node.ovs_image not in docker_images.stdout
- and openshift.common.use_openshift_sdn | bool
-
-- name: Wait for OpenVSwitch image
- command: >
- docker images
- register: docker_images
- until: openshift.node.ovs_image in docker_images.stdout
- retries: 30
- delay: 10
- changed_when: false
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
- name: Install Node docker service file
@@ -130,6 +103,21 @@
- name: Additional storage plugin configuration
include: storage_plugins/main.yml
+# Necessary because when you're on a node that's also a master the master will be
+# restarted after the node restarts docker and it will take up to 60 seconds for
+# systemd to start the master again
+- name: Wait for master API to become available before proceeding
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl -k --head --silent {{ openshift_node_master_api_url }}
+ register: api_available_output
+ until: api_available_output.stdout.find("200 OK") != -1
+ retries: 120
+ delay: 1
+ changed_when: false
+ when: openshift.common.is_containerized | bool
+
- name: Start and enable node
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: start_result
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 1edf21d9b..14a613786 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -1,4 +1,8 @@
---
+- name: Install NFS storage plugin dependencies
+ action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+ when: not openshift.common.is_atomic | bool
+
- name: Set seboolean to allow nfs storage plugin access from containers
seboolean:
name: virt_use_nfs
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 23bd81f91..44065f4bd 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -11,9 +11,7 @@ imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
kind: NodeConfig
-{% if openshift.node.kubelet_args is defined and openshift.node.kubelet_args %}
-kubeletArguments: {{ openshift.node.kubelet_args | to_json }}
-{% endif %}
+kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
@@ -22,7 +20,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index bcf1d9a34..e3176e611 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -1,2 +1,3 @@
---
+os_firewall_enabled: True
os_firewall_use_firewalld: True
diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml
index ad89ef97c..076e5e311 100644
--- a/roles/os_firewall/tasks/main.yml
+++ b/roles/os_firewall/tasks/main.yml
@@ -1,6 +1,6 @@
---
- include: firewall/firewalld.yml
- when: os_firewall_use_firewalld
+ when: os_firewall_enabled | bool and os_firewall_use_firewalld | bool
- include: firewall/iptables.yml
- when: not os_firewall_use_firewalld
+ when: os_firewall_enabled | bool and not os_firewall_use_firewalld | bool
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index a8b65dd56..1c8d88854 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -1,8 +1,4 @@
---
-- fail:
- msg: "Zabbix config is not yet supported on atomic hosts"
- when: openshift.common.is_containerized | bool
-
- name: Main List all templates
zbx_template:
zbx_server: "{{ ozb_server }}"
@@ -45,6 +41,10 @@
tags:
- zagg_server
+- include_vars: template_config_loop.yml
+ tags:
+ - config_loop
+
- name: Include Template Heartbeat
include: ../../lib_zabbix/tasks/create_template.yml
vars:
@@ -154,3 +154,13 @@
password: "{{ ozb_password }}"
tags:
- zagg_server
+
+- name: Include Template Config Loop
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_config_loop }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+ tags:
+ - config_loop
diff --git a/roles/os_zabbix/vars/template_config_loop.yml b/roles/os_zabbix/vars/template_config_loop.yml
new file mode 100644
index 000000000..823da1868
--- /dev/null
+++ b/roles/os_zabbix/vars/template_config_loop.yml
@@ -0,0 +1,14 @@
+---
+g_template_config_loop:
+ name: Template Config Loop
+ zitems:
+ - key: config_loop.run.exit_code
+ applications:
+ - Config Loop
+ value_type: int
+
+ ztriggers:
+ - name: 'config_loop.run.exit_code not zero on {HOST.NAME}'
+ expression: '{Template Config Loop:config_loop.run.exit_code.min(#2)}>0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_config_loop.asciidoc'
+ priority: average
diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml
index a05e552e3..dd13e76f7 100644
--- a/roles/os_zabbix/vars/template_docker.yml
+++ b/roles/os_zabbix/vars/template_docker.yml
@@ -72,10 +72,12 @@ g_template_docker:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_ping.asciidoc'
priority: high
+ # Re-enable for OpenShift 3.1.1 (https://bugzilla.redhat.com/show_bug.cgi?id=1292971#c6)
- name: 'docker.container.dns.resolution failed on {HOST.NAME}'
expression: '{Template Docker:docker.container.dns.resolution.min(#3)}>0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc'
priority: average
+ status: disabled
- name: 'docker.container.existing.dns.resolution.failed on {HOST.NAME}'
expression: '{Template Docker:docker.container.existing.dns.resolution.failed.min(#3)}>0'
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index a0ba8d104..12ea36c8b 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -98,6 +98,18 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.skydns.port.open
+ description: State of the SkyDNS port open and listening
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.skydns.query
+ description: SkyDNS can be queried or not
+ type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.etcd.create.success
description: Show number of successful create actions
type: int
@@ -305,6 +317,20 @@ g_template_openshift_master:
- 'Openshift Master process not running on {HOST.NAME}'
priority: high
+ - name: 'SkyDNS port not listening on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.skydns.port.open.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: high
+
+ - name: 'SkyDNS query failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.skydns.query.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master API health check is failing on {HOST.NAME}'
+ priority: high
+
zgraphs:
- name: Openshift Master API Server Latency Pods LIST Quantiles
width: 900
diff --git a/roles/oso_host_monitoring/handlers/main.yml b/roles/oso_host_monitoring/handlers/main.yml
index 7863ad15b..3a5d8024c 100644
--- a/roles/oso_host_monitoring/handlers/main.yml
+++ b/roles/oso_host_monitoring/handlers/main.yml
@@ -4,9 +4,3 @@
name: "{{ osohm_host_monitoring }}"
state: restarted
enabled: yes
-
-- name: "Restart the {{ osohm_zagg_client }} service"
- service:
- name: "{{ osohm_zagg_client }}"
- state: restarted
- enabled: yes
diff --git a/roles/oso_host_monitoring/tasks/main.yml b/roles/oso_host_monitoring/tasks/main.yml
index 6ddfa3dcb..a0a453416 100644
--- a/roles/oso_host_monitoring/tasks/main.yml
+++ b/roles/oso_host_monitoring/tasks/main.yml
@@ -5,7 +5,6 @@
with_items:
- osohm_zagg_web_url
- osohm_host_monitoring
- - osohm_zagg_client
- osohm_docker_registry_url
- osohm_default_zagg_server_user
- osohm_default_zagg_server_password
@@ -37,29 +36,12 @@
- "Restart the {{ osohm_host_monitoring }} service"
register: systemd_host_monitoring
-- name: "Copy {{ osohm_zagg_client }} systemd file"
- template:
- src: "{{ osohm_zagg_client }}.service.j2"
- dest: "/etc/systemd/system/{{ osohm_zagg_client }}.service"
- owner: root
- group: root
- mode: 0644
- notify:
- - "Restart the {{ osohm_zagg_client }} service"
- register: zagg_systemd
-
- name: reload systemd
command: /usr/bin/systemctl --system daemon-reload
- when: systemd_host_monitoring | changed or zagg_systemd | changed
+ when: systemd_host_monitoring | changed
- name: "Start the {{ osohm_host_monitoring }} service"
service:
name: "{{ osohm_host_monitoring }}"
state: started
enabled: yes
-
-- name: "Start the {{ osohm_zagg_client }} service"
- service:
- name: "{{ osohm_zagg_client }}"
- state: started
- enabled: yes
diff --git a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
deleted file mode 100644
index d18ad90fe..000000000
--- a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-# This is a systemd file to run this docker container under systemd.
-# To make this work:
-# * pull the image (probably from ops docker registry)
-# * place this file in /etc/systemd/system without the .systemd extension
-# * run the commands:
-# systemctl daemon-reload
-# systemctl enable pcp-docker
-# systemctl start pcp-docker
-#
-#
-[Unit]
-Description=PCP Collector Contatainer
-Requires=docker.service
-After=docker.service
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Environment=HOME=/etc/docker/ops
-#Slice=container-small.slice
-
-# systemd syntax '=-' ignore errors from return codes.
-ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
-
-
-ExecStart=/usr/bin/docker run --rm --name="{{ osohm_host_monitoring }}" \
- --privileged --net=host --pid=host --ipc=host \
- -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \
- -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \
- -v /var/log:/var/log \
- {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
-
-ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-Restart=always
-RestartSec=30
-
-[Install]
-WantedBy=default.target
diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
index bcc8a5e03..ac950b4e5 100644
--- a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2
+++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
@@ -4,12 +4,12 @@
# * place this file in /etc/systemd/system without the .systemd extension
# * run the commands:
# systemctl daemon-reload
-# systemctl enable zagg-client-docker
-# systemctl start zagg-client-docker
+# systemctl enable oso-rhel7-host-monitoring
+# systemctl start oso-rhel7-host-monitoring
#
#
[Unit]
-Description=Zagg Client Contatainer
+Description=Openshift Host Monitoring Container
Requires=docker.service
After=docker.service
@@ -21,40 +21,54 @@ Environment=HOME=/etc/docker/ops
#Slice=container-small.slice
# systemd syntax '=-' ignore errors from return codes.
-ExecStartPre=-/usr/bin/docker kill "{{ osohm_zagg_client }}"
-ExecStartPre=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
-ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_zagg_client }}"
+ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
+# mwoodson note 1-7-16:
+# pcp recommends mounting /run in their Dockerfile
+# /run conflicts with cron which also runs in this container.
+# I am leaving /run out for now. the guys in #pcp said that they mounted /run
+# to shared the pcp socket that is created in /run. We are not using this,
+# as far as I know.
+# This problem goes away with systemd being run in the containers and not using
+# cron but using systemd timers
+# -v /run:/run \
-ExecStart=/usr/bin/docker run --name {{ osohm_zagg_client }} \
+ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} \
--privileged \
--pid=host \
--net=host \
- -e ZAGG_URL={{ osohm_zagg_web_url }} \
- -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
- -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
+ --ipc=host \
+ -e ZAGG_URL={{ osohm_zagg_web_url }} \
+ -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
+ -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
-e ZAGG_CLIENT_HOSTNAME={{ ec2_tag_Name }} \
- -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
+ -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
-e OSO_CLUSTER_GROUP={{ cluster_group }} \
- -e OSO_CLUSTER_ID={{ oo_clusterid }} \
+ -e OSO_CLUSTER_ID={{ oo_clusterid }} \
+ -e OSO_ENVIRONMENT={{ oo_environment }} \
-e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \
-e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \
+ -e OSO_MASTER_HA={{ osohm_master_ha }} \
-v /etc/localtime:/etc/localtime \
- -v /run/pcp:/run/pcp \
+ -v /sys:/sys:ro \
+ -v /sys/fs/selinux \
+ -v /var/lib/docker:/var/lib/docker:ro \
-v /var/run/docker.sock:/var/run/docker.sock \
- -v /var/run/openvswitch:/var/run/openvswitch \
+ -v /var/run/openvswitch:/var/run/openvswitch \
{% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %}
-v /etc/openshift/master/admin.kubeconfig:/etc/openshift/master/admin.kubeconfig \
-v /etc/openshift/master/master.etcd-client.crt:/etc/openshift/master/master.etcd-client.crt \
-v /etc/openshift/master/master.etcd-client.key:/etc/openshift/master/master.etcd-client.key \
-v /etc/openshift/master/master-config.yaml:/etc/openshift/master/master-config.yaml \
{% endif %}
- {{ osohm_docker_registry_url }}{{ osohm_zagg_client }}
+ {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
-ExecReload=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
-ExecReload=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
-ExecStop=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
+ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
+ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
Restart=always
RestartSec=30
diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md
new file mode 100644
index 000000000..4215f9eeb
--- /dev/null
+++ b/roles/oso_monitoring_tools/README.md
@@ -0,0 +1,54 @@
+Role Name
+=========
+
+This role will install the Openshift Monitoring Utilities
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+osomt_zagg_client_config
+
+from vars/main.yml:
+
+osomt_zagg_client_config:
+ host:
+ name: "{{ osomt_host_name }}"
+ zagg:
+ url: "{{ osomt_zagg_url }}"
+ user: "{{ osomt_zagg_user }}"
+ pass: "{{ osomt_zagg_password }}"
+ ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+ verbose: "{{ osomt_zagg_verbose }}"
+ debug: "{{ osomt_zagg_debug }}"
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+- role: "oso_monitoring_tools"
+ osomt_host_name: hostname
+ osomt_zagg_url: http://path.to/zagg_web
+ osomt_zagg_user: admin
+ osomt_zagg_password: password
+ osomt_zagg_ssl_verify: True
+ osomt_zagg_verbose: False
+ osomt_zagg_debug: False
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml
new file mode 100644
index 000000000..a17424f25
--- /dev/null
+++ b/roles/oso_monitoring_tools/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml
new file mode 100644
index 000000000..cefa780ab
--- /dev/null
+++ b/roles/oso_monitoring_tools/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml
new file mode 100644
index 000000000..9c42b68dc
--- /dev/null
+++ b/roles/oso_monitoring_tools/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: OpenShift Operations
+ description: Install Openshift Monitoring tools
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml
new file mode 100644
index 000000000..c90fc56e2
--- /dev/null
+++ b/roles/oso_monitoring_tools/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# tasks file for oso_monitoring_tools
+- name: Install the Openshift Tools RPMS
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - openshift-tools-scripts-monitoring-zagg-client
+ - python-openshift-tools-monitoring-zagg
+ - python-openshift-tools-monitoring-zabbix
+
+- debug: var=g_zagg_client_config
+
+- name: Generate the /etc/openshift_tools/zagg_client.yaml config file
+ copy:
+ content: "{{ osomt_zagg_client_config | to_nice_yaml }}"
+ dest: /etc/openshift_tools/zagg_client.yaml
+ mode: "644"
diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml
new file mode 100644
index 000000000..3538ba30b
--- /dev/null
+++ b/roles/oso_monitoring_tools/vars/main.yml
@@ -0,0 +1,12 @@
+---
+# vars file for oso_monitoring_tools
+osomt_zagg_client_config:
+ host:
+ name: "{{ osomt_host_name }}"
+ zagg:
+ url: "{{ osomt_zagg_url }}"
+ user: "{{ osomt_zagg_user }}"
+ pass: "{{ osomt_zagg_password }}"
+ ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+ verbose: "{{ osomt_zagg_verbose }}"
+ debug: "{{ osomt_zagg_debug }}"
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index e9e6e4bd4..08540f440 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -2,8 +2,24 @@
- name: Disable all repositories
command: subscription-manager repos --disable="*"
+- set_fact:
+ default_ose_version: '3.0'
+ when: deployment_type == 'enterprise'
+
+- set_fact:
+ default_ose_version: '3.1'
+ when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
+
+- set_fact:
+ ose_version: "{{ lookup('oo_option', 'ose_version') | default(default_ose_version, True) }}"
+
+- fail:
+ msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
+ when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1'] )
+
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-3.0-rpms"
+ --enable="rhel-7-server-ose-{{ ose_version }}-rpms"
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index c160ea4e9..eecfd04a0 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -41,4 +41,4 @@
command: subscription-manager subscribe --pool {{ openshift_pool_id.stdout_lines[0] }}
- include: enterprise.yml
- when: deployment_type == 'enterprise'
+ when: deployment_type in [ 'enterprise', 'atomic-enterprise', 'openshift-enterprise' ]
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index c86ba2f4f..1aacf3a4b 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -33,9 +33,7 @@ def is_valid_hostname(hostname):
def validate_prompt_hostname(hostname):
if '' == hostname or is_valid_hostname(hostname):
return hostname
- raise click.BadParameter('"{}" appears to be an invalid hostname. ' \
- 'Please double-check this value i' \
- 'and re-enter it.'.format(hostname))
+ raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
@@ -72,7 +70,7 @@ def delete_hosts(hosts):
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
-def collect_hosts(oo_cfg, masters_set=False, print_summary=True):
+def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
@@ -129,15 +127,18 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
masters_set = True
host_props['node'] = True
- #TODO: Reenable this option once container installs are out of tech preview
- #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
- # type=click.Choice(['rpm', 'container']),
- # default='rpm')
- #if rpm_or_container == 'container':
- # host_props['containerized'] = True
- #else:
- # host_props['containerized'] = False
host_props['containerized'] = False
+ if oo_cfg.settings['variant_version'] != '3.0':
+ rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ type=click.Choice(['rpm', 'container']),
+ default='rpm')
+ if rpm_or_container == 'container':
+ host_props['containerized'] = True
+
+ if existing_env:
+ host_props['new_host'] = True
+ else:
+ host_props['new_host'] = False
host = Host(**host_props)
@@ -507,7 +508,7 @@ def collect_new_nodes(oo_cfg):
Add new nodes here
"""
click.echo(message)
- return collect_hosts(oo_cfg, masters_set=True, print_summary=False)
+ return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 031b82bc1..33ab27567 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -38,6 +38,7 @@ class Host(object):
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
self.preconfigured = kwargs.get('preconfigured', None)
+ self.new_host = kwargs.get('new_host', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
@@ -68,7 +69,8 @@ class Host(object):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']:
+ 'master', 'node', 'master_lb', 'containerized',
+ 'connect_to', 'preconfigured', 'new_host']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index fd2cd7fbd..c0d115fdc 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -19,13 +19,15 @@ def generate_inventory(hosts):
global CFG
masters = [host for host in hosts if host.master]
nodes = [host for host in hosts if host.node]
+ new_nodes = [host for host in hosts if host.node and host.new_host]
proxy = determine_proxy_configuration(hosts)
multiple_masters = len(masters) > 1
+ scaleup = len(new_nodes) > 0
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
- write_inventory_children(base_inventory, multiple_masters, proxy)
+ write_inventory_children(base_inventory, multiple_masters, proxy, scaleup)
write_inventory_vars(base_inventory, multiple_masters, proxy)
@@ -71,6 +73,11 @@ def generate_inventory(hosts):
base_inventory.write('\n[lb]\n')
write_host(proxy, base_inventory)
+ if scaleup:
+ base_inventory.write('\n[new_nodes]\n')
+ for node in new_nodes:
+ write_host(node, base_inventory)
+
base_inventory.close()
return base_inventory_path
@@ -84,12 +91,14 @@ def determine_proxy_configuration(hosts):
return None
-def write_inventory_children(base_inventory, multiple_masters, proxy):
+def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup):
global CFG
base_inventory.write('\n[OSEv3:children]\n')
base_inventory.write('masters\n')
base_inventory.write('nodes\n')
+ if scaleup:
+ base_inventory.write('new_nodes\n')
if multiple_masters:
base_inventory.write('etcd\n')
if not getattr(proxy, 'preconfigured', True):
@@ -119,6 +128,8 @@ def write_host(host, inventory, schedulable=None):
facts += ' openshift_hostname={}'.format(host.hostname)
if host.public_hostname:
facts += ' openshift_public_hostname={}'.format(host.public_hostname)
+ if host.containerized:
+ facts += ' containerized={}'.format(host.containerized)
# TODO: For not write_host is handles both master and nodes.
# Technically only nodes will ever need this.
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index ea380d565..72e8521d0 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -681,9 +681,9 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', False),
- ('10.0.0.3', False)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -722,10 +722,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', False),
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
],
- add_nodes=[('10.0.0.3', False)],
+ add_nodes=[('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -773,9 +773,9 @@ class AttendedCliTests(OOCliFixture):
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
+ ('10.0.0.1', True, False),
],
- add_nodes=[('10.0.0.2', False)],
+ add_nodes=[('10.0.0.2', False, False)],
ssh_user='root',
variant_num=1,
schedulable_masters_ok=True,
@@ -796,10 +796,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', True),
- ('10.0.0.4', False)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False),
+ ('10.0.0.4', False, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -837,9 +837,9 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', True)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -872,10 +872,10 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True),
- ('10.0.0.2', True),
- ('10.0.0.3', False),
- ('10.0.0.4', True)],
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', False, False),
+ ('10.0.0.4', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y',
@@ -893,7 +893,7 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True)],
+ ('10.0.0.1', True, False)],
ssh_user='root',
variant_num=1,
confirm_facts='y')
@@ -921,7 +921,7 @@ class AttendedCliTests(OOCliFixture):
run_playbook_mock.return_value = 0
cli_input = build_input(hosts=[
- ('10.0.0.1', True)],
+ ('10.0.0.1', True, False)],
ssh_user='root',
variant_num=2,
confirm_facts='y')
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 90bd9e1ef..be759578a 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -138,7 +138,7 @@ class OOCliFixture(OOInstallFixture):
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-#pylint: disable=too-many-arguments,too-many-branches
+#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def build_input(ssh_user=None, hosts=None, variant_num=None,
add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
master_lb=None):
@@ -163,13 +163,19 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
num_masters = 0
if hosts:
i = 0
- for (host, is_master) in hosts:
+ for (host, is_master, is_containerized) in hosts:
inputs.append(host)
if is_master:
inputs.append('y')
num_masters += 1
else:
inputs.append('n')
+
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
+
#inputs.append('rpm')
# We should not be prompted to add more hosts if we're currently at
# 2 masters, this is an invalid HA configuration, so this question
@@ -196,8 +202,12 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
inputs.append('y')
inputs.append('1') # Add more nodes
i = 0
- for (host, is_master) in add_nodes:
+ for (host, is_master, is_containerized) in add_nodes:
inputs.append(host)
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
#inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts