From aa30c91de1a1ae50ca087b0cdf9cf12c336c0e17 Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Oct 20 2016 22:59:10 +0000 Subject: initial working prototype Signed-off-by: Adam Miller --- diff --git a/README.rst b/README.rst index 8ce7217..389ff8f 100644 --- a/README.rst +++ b/README.rst @@ -41,3 +41,17 @@ machine) is often frowned upon. The goal here is to allow for the deployment of | | +--------------------------------------------------------------------------+ +How to use this +---------------- + +You will need to configure two ansible inventory files, the first will be for +the "local" ansible run which will target the group ``openshift_control`` and +that file is ``inventory/inventory``. You will also need to configure the +inventory that will be used in the remote execution of ansible on the openshift +control machine, this file is ``files/openshift-cluster-inventory`` + +Then you can run the playbook to run the playbook: + +:: + + ansible-playbook ansible-ansible-openshift-ansible.yml -i inventory/inventory diff --git a/ansible-ansible-openshift-ansible.yml b/ansible-ansible-openshift-ansible.yml new file mode 100644 index 0000000..fd87c42 --- /dev/null +++ b/ansible-ansible-openshift-ansible.yml @@ -0,0 +1,37 @@ +--- +- name: Run ansible on openshift_control + hosts: openshift_control + user: root + gather_facts: True + + vars: + openshift_ansible_tag: + openshift_ansible_path: "/tmp/openshift-ansible" + openshift_ansible_playbook: "playbooks/byo/config.yml" + openshift_ansible_refspec: "openshift-ansible-3.2.35-1" + cluster_inventory_filename: "openshift-cluster-inventory" + + tasks: + - name: ensure old git clone is removed, we want a clean checkout each run + file: + path: "{{ openshift_ansible_path }}" + state: absent + + - name: git clone the openshift-ansible repo + git: + repo: "https://github.com/openshift/openshift-ansible.git" + dest: "{{ openshift_ansible_path }}" + refspec: "{{ openshift_ansible_refspec }}" + + - name: copy the inventory file + copy: + src: "files/{{ cluster_inventory_filename }}" + dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}" + + - name: run ansible + shell: "ansible-playbook {{ openshift_ansible_playbook }} -i {{ cluster_inventory_filename }}" + args: + chdir: "{{ openshift_ansible_path }}" + + +# vim:ft=ansible: \ No newline at end of file diff --git a/files/openshift-cluster-inventory b/files/openshift-cluster-inventory new file mode 100644 index 0000000..8199761 --- /dev/null +++ b/files/openshift-cluster-inventory @@ -0,0 +1,540 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb +#nfs + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_become must be set to true and the +# user must be configured for passwordless sudo +#ansible_become=yes + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise +deployment_type=origin + +# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we +# rely on the version running on the first master. Works best for containerized installs where we can usually +# use this to lookup the latest exact version of the container images, which is the tag actually used to configure +# the cluster. For RPM installations we just verify the version detected in your configured repos matches this +# release. +openshift_release=v1.2 + +# Specify an exact container image tag to install or configure. +# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_image_tag=v1.2.0 + +# Specify an exact rpm version to install or configure. +# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_pkg_version=-1.2.0 + +# Install the openshift examples +#openshift_install_examples=true + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure metricsPublicURL in the master config for cluster metrics +# See: https://docs.openshift.org/latest/install_config/cluster_metrics.html +#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics + +# Configure loggingPublicURL in the master config for aggregate logging +# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html +#openshift_master_logging_public_url=https://kibana.example.com + +# Configure imagePolicyConfig in the master config +# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} + +# Docker Configuration +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#openshift_docker_additional_registries=registry.example.com +#openshift_docker_insecure_registries=registry.example.com +#openshift_docker_blocked_registries=registry.hacker.com +# Disable pushing to dockerhub +#openshift_docker_disable_push_dockerhub=True +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +# Default value: "--log-driver=json-file --log-opt max-size=50m" +#openshift_docker_options="-l warn --ipv6=false" + +# Specify exact version of Docker to configure or upgrade to. +# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. +# docker_version="1.10.3" + +# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. +# docker_upgrade=False + +# Alternate image format string. If you're not modifying the format string and +# only need to inject your own registry you may want to consider +# openshift_docker_additional_registries instead +#oreg_url=example.com/openshift3/ose-${component}:${version} +# If oreg_url points to a registry other than registry.access.redhat.com we can +# modify image streams to point at that registry by setting the following to true +#openshift_examples_modify_imagestreams=True + + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] +# Defining htpasswd users +#openshift_master_htpasswd_users={'user1': '', 'user2': ''} +# or +#openshift_master_htpasswd_file= + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] +# Configuring the ldap ca certificate +#openshift_master_ldap_ca= +# or +#openshift_master_ldap_ca_file= + +# Available variables for configuring certificates for other identity providers: +#openshift_master_openid_ca +#openshift_master_openid_ca_file +#openshift_master_request_header_ca +#openshift_master_request_header_ca_file + +# Cloud Provider Configuration +# +# Note: You may make use of environment variables rather than store +# sensitive configuration within the ansible inventory. +# For example: +#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}" +#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" +# +# AWS +#openshift_cloudprovider_kind=aws +# Note: IAM profiles may be used instead of storing API credentials on disk. +#openshift_cloudprovider_aws_access_key=aws_access_key_id +#openshift_cloudprovider_aws_secret_key=aws_secret_access_key +# +# Openstack +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ +#openshift_cloudprovider_openstack_username=username +#openshift_cloudprovider_openstack_password=password +#openshift_cloudprovider_openstack_tenant_id=tenant_id +#openshift_cloudprovider_openstack_tenant_name=tenant_name +#openshift_cloudprovider_openstack_region=region +#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure additional projects +#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# default subdomain to use for exposed routes +#openshift_master_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# Override the default oauth tokenConfig settings: +# openshift_master_access_token_max_seconds=86400 +# openshift_master_auth_token_max_seconds=500 + +# Override master servingInfo.maxRequestsInFlight +#openshift_master_max_requests_inflight=500 + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] + +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router force subdomain (optional) +# A router path format to force on all routes used by this router +# (will ignore the route host value) +#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} +# +# Disable management of the OpenShift Router +#openshift_hosted_manage_router=false + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_registry_selector='region=infra' +# +# Registry replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift registry selector. +#openshift_hosted_registry_replicas=2 +# +# Disable management of the OpenShift Registry +#openshift_hosted_manage_registry=false + +# Registry Storage Options +# +# NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/registry" +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/registry" +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +#openshift_hosted_registry_storage_host=nfs.example.com +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# Openstack +# Volume must already exist. +#openshift_hosted_registry_storage_kind=openstack +#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem=ext4 +#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 +#openshift_hosted_registry_storage_volume_size=10Gi +# +# AWS S3 +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id +#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_pullthrough=true + +# Deploy metrics +#openshift_hosted_metrics_deploy=True +# Metrics Storage Options +# +# NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/metrics" +#openshift_hosted_metrics_storage_kind=nfs +#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_metrics_storage_nfs_directory=/exports +#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_hosted_metrics_storage_volume_name=metrics +#openshift_hosted_metrics_storage_volume_size=10Gi +# +# External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/metrics" +#openshift_hosted_metrics_storage_kind=nfs +#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_metrics_storage_host=nfs.example.com +#openshift_hosted_metrics_storage_nfs_directory=/exports +#openshift_hosted_metrics_storage_volume_name=metrics +#openshift_hosted_metrics_storage_volume_size=10Gi + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network and kubernetes service CIDR blocks. These +# network blocks should be private and should not conflict with network blocks +# in your infrastructure that pods may require access to. Can not be changed +# after deployment. +#osm_cluster_network_cidr=10.1.0.0/16 +#openshift_portal_net=172.30.0.0/16 + + +# ExternalIPNetworkCIDRs controls what values are acceptable for the +# service external IP field. If empty, no externalIP may be set. It +# may contain a list of CIDRs which are checked for access. If a CIDR +# is prefixed with !, IPs in that CIDR will be rejected. Rejections +# will be applied first, then the IP checked against one of the +# allowed CIDRs. You should ensure this range does not overlap with +# your nodes, pods, or service CIDRs for security reasons. +#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] + +# Configure number of bits to allocate to each host’s subnet e.g. 8 +# would mean a /24 network on the host. +#osm_host_subnet_length=8 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 + +# set RPM version for debugging purposes +#openshift_pkg_version=-1.1 + +# Configure custom ca certificate +#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} +# +# NOTE: CA certificate will not be replaced with existing clusters. +# This option may only be specified when creating a new cluster or +# when redeploying cluster certificates with the redeploy-certificates +# playbook. If replacing the CA certificate in an existing cluster +# with a custom ca certificate, the following variable must also be +# set. +#openshift_certificates_redeploy_ca=true + +# Configure custom named certificates (SNI certificates) +# +# https://docs.openshift.org/latest/install_config/certificate_customization.html +# +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# +# An optional CA may be specified for each named certificate. CAs will +# be added to the OpenShift CA bundle which allows for the named +# certificate to be served for internal cluster communication. +# +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates=true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# Configure node kubelet arguments +#openshift_node_kubelet_args={'max-pods': ['110'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + +# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq +# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults +# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot +# be used with 1.0 and 3.0. +#openshift_use_dnsmasq=False +# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf +# This is useful for POC environments where DNS may not actually be available yet. +#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf + +# Global Proxy Configuration +# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment +# variables for docker and master services. +#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT +#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT +#openshift_no_proxy='.hosts.example.com,some-host.com' +# +# Most environments don't require a proxy between openshift masters, nodes, and +# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. +# If all of your hosts share a common domain you may wish to disable this and +# specify that domain above. +#openshift_generate_no_proxy_hosts=True +# +# These options configure the BuildDefaults admission controller which injects +# environment variables into Builds. These values will default to the global proxy +# config values. You only need to set these if they differ from the global settings +# above. See BuildDefaults +# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_no_proxy=build_defaults +#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT +# Or you may optionally define your own serialized as json +#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}' + +# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default +#openshift_master_dynamic_provisioning_enabled=False + +# Configure usage of openshift_clock role. +#openshift_clock_enabled=true + +# OpenShift Per-Service Environment Variables +# Environment variables are added to /etc/sysconfig files for +# each OpenShift service: node, master (api and controllers). +# API and controllers environment variables are merged in single +# master environments. +#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} +#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} +#openshift_node_env_vars={"ENABLE_HTTP2": "true"} + +# Enable API service auditing, available as of 1.3 +#openshift_master_audit_config={"basicAuditEnabled": true} + +# Logging deployment +#### As per sdodson, still waiting on a refactor here + + +# host group for masters +[masters] +origin-master01.example.com + +[etcd] +origin-master01.example.com + +[lb] +origin-master01.example.com + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +origin-master01.example.com openshift_node_labels="{'region':'infra'}" openshift_schedulable=False +origin-node0[1:2].example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/inventory b/inventory/inventory new file mode 100644 index 0000000..14c2a80 --- /dev/null +++ b/inventory/inventory @@ -0,0 +1,2 @@ +[openshift_control] +openshift-control.example.com \ No newline at end of file