#458 cleanup: remove phx2 from ansible-ansible-openshift-ansible
Merged 3 years ago by kevin. Opened 3 years ago by darknao.
fedora-infra/ darknao/ansible cleanup/ansible-openshift  into  main

@@ -32,15 +32,6 @@ 

    - ansible-ansible-openshift-ansible-config

    when: env == 'staging' and inventory_hostname.startswith('os-')

  

- - name: generate the inventory file (production) (phx2)

-   template:

-     src: "cluster-inventory-prod.j2"

-     dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}"

-   tags:

-   - ansible-ansible-openshift-ansible

-   - ansible-ansible-openshift-ansible-config

-   when: env == 'production' and inventory_hostname.startswith('os-') and datacenter == 'phx2'

- 

  - name: generate the inventory file (production) (iad2)

    template:

      src: "cluster-inventory-iad2-prod.j2"

@@ -536,7 +536,7 @@ 

  {% if env == "staging" %}

  openshift_hosted_registry_storage_kind=nfs

  openshift_hosted_registry_storage_access_modes=['ReadWriteMany']

- openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com

+ openshift_hosted_registry_storage_host=ntap-iad2-c02-fedora01-nfs01a.iad2.fedoraproject.org

  openshift_hosted_registry_storage_nfs_directory=/

  openshift_hosted_registry_storage_volume_name=openshift-stg-registry

  openshift_hosted_registry_storage_volume_size=10Gi

@@ -1,1166 +0,0 @@ 

- # This is an example of an OpenShift-Ansible host inventory that provides the

- # minimum recommended configuration for production use. This includes 3 masters,

- # two infra nodes, two compute nodes, and an haproxy load balancer to load

- # balance traffic to the API servers. For a truly production environment you

- # should use an external load balancing solution that itself is highly available.

- 

- [masters]

- {% for host in groups[openshift_cluster_masters_group] %}

- {% if hostvars[host].datacenter == 'phx2' %}

- {{ host }}

- {% endif %}

- {% endfor %}

- 

- [etcd]

- {% for host in groups[openshift_cluster_masters_group] %}

- {% if hostvars[host].datacenter == 'phx2' %}

- {{ host }}

- {% endif %}

- {% endfor %}

- 

- [nodes]

- {% for host in groups[openshift_cluster_masters_group] %}

- {% if hostvars[host].datacenter == 'phx2' %}

- {{ host }} openshift_node_group_name='node-config-master'

- {% endif %}

- {% endfor %}

- {% for host in groups[openshift_cluster_nodes_group] %}

- {% if hostvars[host].datacenter == 'phx2' %}

- {{ host }} openshift_node_group_name='node-config-compute'

- {% endif %}

- {% endfor %}

- 

- #[nfs]

- #ose3-master1.test.example.com

- 

- #[lb]

- #ose3-lb.test.example.com

- 

- # Create an OSEv3 group that contains the masters and nodes groups

- [OSEv3:children]

- masters

- nodes

- etcd

- #lb

- #nfs

- 

- [OSEv3:vars]

- 

- openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]

- ###############################################################################

- # Common/ Required configuration variables follow                             #

- ###############################################################################

- # SSH user, this user should allow ssh based auth without requiring a

- # password. If using ssh key based auth, then the key should be managed by an

- # ssh agent.

- ansible_user={{openshift_ansible_ssh_user}}

- 

- # If ansible_user is not root, ansible_become must be set to true and the

- # user must be configured for passwordless sudo

- #ansible_become=yes

- 

- # Specify the deployment type. Valid values are origin and openshift-enterprise.

- #openshift_deployment_type=origin

- openshift_deployment_type={{openshift_deployment_type}}

- 

- # Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we

- # rely on the version running on the first master. Works best for containerized installs where we can usually

- # use this to lookup the latest exact version of the container images, which is the tag actually used to configure

- # the cluster. For RPM installations we just verify the version detected in your configured repos matches this

- # release.

- openshift_release={{openshift_release}}

- 

- {% if openshift_master_ha is defined %}

- {% if openshift_master_ha %}

- # Native high availability cluster method with optional load balancer.

- # If no lb group is defined, the installer assumes that a load balancer has

- # been preconfigured. For installation the value of

- # openshift_master_cluster_hostname must resolve to the load balancer

- # or to one or all of the masters defined in the inventory if no load

- # balancer is present.

- openshift_master_cluster_method=native

- openshift_master_cluster_hostname={{openshift_internal_cluster_url}}

- openshift_master_cluster_public_hostname={{openshift_cluster_url}}

- {% endif %}

- {% endif %}

- 

- # default subdomain to use for exposed routes, you should have wildcard dns

- # for *.apps.test.example.com that points at your infra nodes which will run

- # your router

- {% if openshift_app_subdomain is defined %}

- openshift_master_default_subdomain={{openshift_app_subdomain}}

- {% endif %}

- 

- ###############################################################################

- # Additional configuration variables follow                                   #

- ###############################################################################

- 

- # Debug level for all OpenShift components (Defaults to 2)

- debug_level={{openshift_debug_level}}

- 

- # Specify an exact container image tag to install or configure.

- # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.

- # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.

- #openshift_image_tag=v3.10.0

- openshift_image_tag="v3.11"

- 

- # Specify an exact rpm version to install or configure.

- # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.

- # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.

- #openshift_pkg_version=-3.10.0

- openshift_pkg_version="-3.11.200"

- 

- # If using Atomic Host, you may specify system container image registry for the nodes:

- #system_images_registry="docker.io"

- # when openshift_deployment_type=='openshift-enterprise'

- #system_images_registry="registry.access.redhat.com"

- 

- # Manage openshift example imagestreams and templates during install and upgrade

- #openshift_install_examples=true

- {% if openshift_ansible_install_examples is defined %}

- openshift_install_examples={{openshift_ansible_install_examples}}

- {% endif %}

- 

- # Configure logoutURL in the master config for console customization

- # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url

- #openshift_master_logout_url=http://example.com

- 

- # Configure extensions in the master config for console customization

- # See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files

- #openshift_master_oauth_templates={'login': '/path/to/login-template.html'}

- # openshift_master_oauth_template is deprecated.  Use openshift_master_oauth_templates instead.

- #openshift_master_oauth_template=/path/to/login-template.html

- 

- # Configure imagePolicyConfig in the master config

- # See: https://docs.openshift.org/latest/admin_guide/image_policy.html

- #openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}

- 

- # Configure master API rate limits for external clients

- #openshift_master_external_ratelimit_qps=200

- #openshift_master_external_ratelimit_burst=400

- # Configure master API rate limits for loopback clients

- #openshift_master_loopback_ratelimit_qps=300

- #openshift_master_loopback_ratelimit_burst=600

- 

- # Install and run cri-o.

- #openshift_use_crio=False

- #openshift_use_crio_only=False

- {% if openshift_ansible_use_crio is defined %}

- openshift_use_crio={{ openshift_ansible_use_crio }}

- {% endif %}

- {% if openshift_ansible_use_crio_only is defined %}

- openshift_use_crio_only={{ openshift_ansible_crio_only }}

- {% endif %}

- # The following two variables are used when openshift_use_crio is True

- # and cleans up after builds that pass through docker. When openshift_use_crio is True

- # these variables are set to the defaults shown. You may override them here.

- # NOTE: You will still need to tag crio nodes with your given label(s)!

- # Enable docker garbage collection when using cri-o

- #openshift_crio_enable_docker_gc=True

- # Node Selectors to run the garbage collection

- #openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}

- 

- # Items added, as is, to end of /etc/sysconfig/docker OPTIONS

- # Default value: "--log-driver=journald"

- #openshift_docker_options="-l warn --ipv6=false"

- 

- # Specify exact version of Docker to configure or upgrade to.

- # Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.

- # docker_version="1.12.1"

- 

- # Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.

- # Uncomment below to disable; for example if your kernel does not support the

- # Docker overlay/overlay2 storage drivers with SELinux enabled.

- #openshift_docker_selinux_enabled=False

- 

- # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.

- # docker_upgrade=False

- 

- # Specify a list of block devices to be formatted and mounted on the nodes

- # during prerequisites.yml. For each hash, "device", "path", "filesystem" are

- # required. To add devices only on certain classes of node, redefine

- # container_runtime_extra_storage as a group var.

- #container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]'

- 

- # Enable etcd debug logging, defaults to false

- # etcd_debug=true

- # Set etcd log levels by package

- # etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"

- 

- # Upgrade Hooks

- #

- # Hooks are available to run custom tasks at various points during a cluster

- # upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using

- # absolute paths, if not the path will be treated as relative to the file where the

- # hook is actually used.

- #

- # Tasks to run before each master is upgraded.

- # openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml

- #

- # Tasks to run to upgrade the master. These tasks run after the main openshift-ansible

- # upgrade steps, but before we restart system/services.

- # openshift_master_upgrade_hook=/usr/share/custom/master.yml

- #

- # Tasks to run after each master is upgraded and system/services have been restarted.

- # openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml

- 

- # Cluster Image Source (registry) configuration

- # openshift-enterprise default is 'registry.access.redhat.com/openshift3/ose-${component}:${version}'

- # origin default is 'docker.io/openshift/origin-${component}:${version}'

- #oreg_url=example.com/openshift3/ose-${component}:${version}

- # If oreg_url points to a registry other than registry.access.redhat.com we can

- # modify image streams to point at that registry by setting the following to true

- #openshift_examples_modify_imagestreams=true

- # Add insecure and blocked registries to global docker configuration

- #openshift_docker_insecure_registries=registry.example.com

- #openshift_docker_blocked_registries=registry.hacker.com

- # You may also configure additional default registries for docker, however this

- # is discouraged. Instead you should make use of fully qualified image names.

- #openshift_docker_additional_registries=registry.example.com

- 

- # If oreg_url points to a registry requiring authentication, provide the following:

- oreg_auth_user="{{ os_prod_registry_user }}"

- oreg_auth_password="{{ os_prod_registry_password }}"

- # NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.

- # oreg_auth_pass should be generated from running docker login.

- # To update registry auth credentials, uncomment the following:

- #oreg_auth_credentials_replace=True

- 

- # OpenShift repository configuration

- #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]

- #openshift_repos_enable_testing=false

- 

- # If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in

- # a disconnected and containerized installation, use osm_etcd_image to specify the image to use:

- #osm_etcd_image=rhel7/etcd

- 

- # htpasswd auth

- #openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]

- # Defining htpasswd users

- #openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}

- # or

- #openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>

- 

- {% if openshift_auth_profile == "osbs" %}

- openshift_master_manage_htpasswd=false

- openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift_htpasswd_file }}'}]

- {% endif %}

- 

- {% if openshift_auth_profile == "fedoraidp" %}

- openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]

- {% endif %}

- 

- {% if openshift_auth_profile == "fedoraidp-stg" %}

- openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]

- {% endif %}

- 

- # Allow all auth

- #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]

- 

- # LDAP auth

- #openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]

- #

- # Configure LDAP CA certificate

- # Specify either the ASCII contents of the certificate or the path to

- # the local file that will be copied to the remote host. CA

- # certificate contents will be copied to master systems and saved

- # within /etc/origin/master/ with a filename matching the "ca" key set

- # within the LDAPPasswordIdentityProvider.

- #

- #openshift_master_ldap_ca=<ca text>

- # or

- #openshift_master_ldap_ca_file=<path to local ca file to use>

- 

- # OpenID auth

- #openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]

- #

- # Configure OpenID CA certificate

- # Specify either the ASCII contents of the certificate or the path to

- # the local file that will be copied to the remote host. CA

- # certificate contents will be copied to master systems and saved

- # within /etc/origin/master/ with a filename matching the "ca" key set

- # within the OpenIDIdentityProvider.

- #

- #openshift_master_openid_ca=<ca text>

- # or

- #openshift_master_openid_ca_file=<path to local ca file to use>

- 

- # Request header auth

- #openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]

- #

- # Configure request header CA certificate

- # Specify either the ASCII contents of the certificate or the path to

- # the local file that will be copied to the remote host. CA

- # certificate contents will be copied to master systems and saved

- # within /etc/origin/master/ with a filename matching the "clientCA"

- # key set within the RequestHeaderIdentityProvider.

- #

- #openshift_master_request_header_ca=<ca text>

- # or

- #openshift_master_request_header_ca_file=<path to local ca file to use>

- 

- # CloudForms Management Engine (ManageIQ) App Install

- #

- # Enables installation of MIQ server. Recommended for dedicated

- # clusters only. See roles/openshift_management/README.md for instructions

- # and requirements.

- #openshift_management_install_management=False

- 

- # Cloud Provider Configuration

- #

- # Note: You may make use of environment variables rather than store

- # sensitive configuration within the ansible inventory.

- # For example:

- #openshift_cloudprovider_aws_access_key="{ lookup('env','AWS_ACCESS_KEY_ID') }"

- #openshift_cloudprovider_aws_secret_key="{ lookup('env','AWS_SECRET_ACCESS_KEY') }"

- #

- # AWS

- #openshift_cloudprovider_kind=aws

- # Note: IAM profiles may be used instead of storing API credentials on disk.

- #openshift_cloudprovider_aws_access_key=aws_access_key_id

- #openshift_cloudprovider_aws_secret_key=aws_secret_access_key

- #

- # Openstack

- #openshift_cloudprovider_kind=openstack

- #openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/

- #openshift_cloudprovider_openstack_username=username

- #openshift_cloudprovider_openstack_password=password

- #openshift_cloudprovider_openstack_domain_id=domain_id

- #openshift_cloudprovider_openstack_domain_name=domain_name

- #openshift_cloudprovider_openstack_tenant_id=tenant_id

- #openshift_cloudprovider_openstack_tenant_name=tenant_name

- #openshift_cloudprovider_openstack_region=region

- #openshift_cloudprovider_openstack_lb_subnet_id=subnet_id

- #

- # Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting

- #openshift_cloudprovider_openstack_blockstorage_version=v2

- #

- # GCE

- #openshift_cloudprovider_kind=gce

- # Note: When using GCE, openshift_gcp_project and openshift_gcp_prefix must be

- # defined.

- # openshift_gcp_project is the project-id

- #openshift_gcp_project=

- # openshift_gcp_prefix is a unique string to identify each openshift cluster.

- #openshift_gcp_prefix=

- #openshift_gcp_multizone=False

- # Note: To enable nested virtualization in gcp use the following variable and url

- #openshift_gcp_licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"

- # Additional details regarding nested virtualization are available:

- # https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances

- #

- # vSphere

- #openshift_cloudprovider_kind=vsphere

- #openshift_cloudprovider_vsphere_username=username

- #openshift_cloudprovider_vsphere_password=password

- #openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host

- #openshift_cloudprovider_vsphere_datacenter=datacenter

- #openshift_cloudprovider_vsphere_datastore=datastore

- #openshift_cloudprovider_vsphere_folder=optional_folder_name

- 

- 

- # Project Configuration

- #osm_project_request_message=''

- #osm_project_request_template=''

- #osm_mcs_allocator_range='s0:/2'

- #osm_mcs_labels_per_project=5

- #osm_uid_allocator_range='1000000000-1999999999/10000'

- 

- # Configure additional projects

- #openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}

- 

- # Enable cockpit

- #osm_use_cockpit=true

- #

- # Set cockpit plugins

- #osm_cockpit_plugins=['cockpit-kubernetes']

- 

- # If an external load balancer is used public hostname should resolve to

- # external load balancer address

- #openshift_master_cluster_public_hostname=openshift-ansible.public.example.com

- 

- # Configure controller arguments

- #osm_controller_args={'resource-quota-sync-period': ['10s']}

- 

- # Configure api server arguments

- #osm_api_server_args={'max-requests-inflight': ['400']}

- 

- # additional cors origins

- #osm_custom_cors_origins=['foo.example.com', 'bar.example.com']

- 

- # default project node selector

- #osm_default_node_selector='region=primary'

- 

- # Override the default pod eviction timeout

- #openshift_master_pod_eviction_timeout=5m

- 

- # Override the default oauth tokenConfig settings:

- # openshift_master_access_token_max_seconds=86400

- # openshift_master_auth_token_max_seconds=500

- 

- # Override master servingInfo.maxRequestsInFlight

- #openshift_master_max_requests_inflight=500

- 

- # Override master and node servingInfo.minTLSVersion and .cipherSuites

- # valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12

- # example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants

- #openshift_master_min_tls_version=VersionTLS12

- #openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']

- #

- #openshift_node_min_tls_version=VersionTLS12

- #openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']

- 

- # default storage plugin dependencies to install, by default the ceph and

- # glusterfs plugin dependencies will be installed, if available.

- #osn_storage_plugin_deps=['ceph','glusterfs','iscsi']

- 

- # OpenShift Router Options

- #

- # An OpenShift router will be created during install if there are

- # nodes present with labels matching the default router selector,

- # "node-role.kubernetes.io/infra=true".

- #

- # Example:

- # [nodes]

- # node.example.com openshift_node_group_name="node-config-infra"

- #

- # Router selector (optional)

- # Router will only be created if nodes matching this label are present.

- # Default value: 'node-role.kubernetes.io/infra=true'

- #openshift_hosted_router_selector='node-role.kubernetes.io/infra=true'

- #

- # Router replicas (optional)

- # Unless specified, openshift-ansible will calculate the replica count

- # based on the number of nodes matching the openshift router selector.

- #openshift_hosted_router_replicas=2

- #

- # Router force subdomain (optional)

- # A router path format to force on all routes used by this router

- # (will ignore the route host value)

- #openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'

- #

- # Router certificate (optional)

- # Provide local certificate paths which will be configured as the

- # router's default certificate.

- #openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}

- #

- # Manage the OpenShift Router (optional)

- #openshift_hosted_manage_router=true

- #

- # Router sharding support has been added and can be achieved by supplying the correct

- # data to the inventory.  The variable to house the data is openshift_hosted_routers

- # and is in the form of a list.  If no data is passed then a default router will be

- # created.  There are multiple combinations of router sharding.  The one described

- # below supports routers on separate nodes.

- #

- #openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]

- 

- # OpenShift Registry Console Options

- # Override the console image prefix:

- # origin default is "cockpit/", enterprise default is "openshift3/"

- #openshift_cockpit_deployer_prefix=registry.example.com/myrepo/

- # origin default is "kubernetes", enterprise default is "registry-console"

- #openshift_cockpit_deployer_basename=my-console

- # Override image version, defaults to latest for origin, vX.Y product version for enterprise

- #openshift_cockpit_deployer_version=1.4.1

- 

- # Openshift Registry Options

- #

- # An OpenShift registry will be created during install if there are

- # nodes present with labels matching the default registry selector,

- # "node-role.kubernetes.io/infra=true".

- #

- # Example:

- # [nodes]

- # node.example.com openshift_node_group_name="node-config-infra"

- #

- # Registry selector (optional)

- # Registry will only be created if nodes matching this label are present.

- # Default value: 'node-role.kubernetes.io/infra=true'

- #openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true'

- #

- # Registry replicas (optional)

- # Unless specified, openshift-ansible will calculate the replica count

- # based on the number of nodes matching the openshift registry selector.

- #openshift_hosted_registry_replicas=2

- #

- # Validity of the auto-generated certificate in days (optional)

- #openshift_hosted_registry_cert_expire_days=730

- #

- # Manage the OpenShift Registry (optional)

- #openshift_hosted_manage_registry=true

- # Manage the OpenShift Registry Console (optional)

- #openshift_hosted_manage_registry_console=true

- #

- # Registry Storage Options

- #

- # NFS Host Group

- # An NFS volume will be created with path "nfs_directory/volume_name"

- # on the host within the [nfs] host group.  For example, the volume

- # path using these options would be "/exports/registry".  "exports" is

- # is the name of the export served by the nfs server.  "registry" is

- # the name of a directory inside of "/exports".

- #openshift_hosted_registry_storage_kind=nfs

- #openshift_hosted_registry_storage_access_modes=['ReadWriteMany']

- # nfs_directory must conform to DNS-1123 subdomain must consist of lower case

- # alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character

- #openshift_hosted_registry_storage_nfs_directory=/exports

- #openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'

- #openshift_hosted_registry_storage_volume_name=registry

- #openshift_hosted_registry_storage_volume_size=10Gi

- #

- # External NFS Host

- # NFS volume must already exist with path "nfs_directory/_volume_name" on

- # the storage_host. For example, the remote volume path using these

- # options would be "nfs.example.com:/exports/registry".  "exports" is

- # is the name of the export served by the nfs server.  "registry" is

- # the name of a directory inside of "/exports".

- #openshift_hosted_registry_storage_kind=nfs

- #openshift_hosted_registry_storage_access_modes=['ReadWriteMany']

- #openshift_hosted_registry_storage_host=nfs.example.com

- # nfs_directory must conform to DNS-1123 subdomain must consist of lower case

- # alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character

- #openshift_hosted_registry_storage_nfs_directory=/exports

- #openshift_hosted_registry_storage_volume_name=registry

- #openshift_hosted_registry_storage_volume_size=10Gi

- {% if env == "staging" %}

- openshift_hosted_registry_storage_kind=nfs

- openshift_hosted_registry_storage_access_modes=['ReadWriteMany']

- openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com

- openshift_hosted_registry_storage_nfs_directory=/

- openshift_hosted_registry_storage_volume_name=openshift-stg-registry

- openshift_hosted_registry_storage_volume_size=10Gi

- {% else %}

- openshift_hosted_registry_storage_kind=nfs

- openshift_hosted_registry_storage_access_modes=['ReadWriteMany']

- openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com

- openshift_hosted_registry_storage_nfs_directory=/

- openshift_hosted_registry_storage_volume_name=openshift-prod-registry

- openshift_hosted_registry_storage_volume_size=10Gi

- {% endif %}

- #

- # Openstack

- # Volume must already exist.

- #openshift_hosted_registry_storage_kind=openstack

- #openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']

- #openshift_hosted_registry_storage_openstack_filesystem=ext4

- #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57

- #openshift_hosted_registry_storage_volume_size=10Gi

- #

- # hostPath (local filesystem storage)

- # Suitable for "all-in-one" or proof of concept deployments

- # Must not be used for high-availability and production deployments

- #openshift_hosted_registry_storage_kind=hostpath

- #openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']

- #openshift_hosted_registry_storage_hostpath_path=/var/lib/openshift_volumes

- #openshift_hosted_registry_storage_volume_size=10Gi

- #

- # AWS S3

- # S3 bucket must already exist.

- #openshift_hosted_registry_storage_kind=object

- #openshift_hosted_registry_storage_provider=s3

- #openshift_hosted_registry_storage_s3_encrypt=false

- #openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id

- #openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id

- #openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key

- #openshift_hosted_registry_storage_s3_bucket=bucket_name

- #openshift_hosted_registry_storage_s3_region=bucket_region

- #openshift_hosted_registry_storage_s3_chunksize=26214400

- #openshift_hosted_registry_storage_s3_rootdirectory=/registry

- #openshift_hosted_registry_pullthrough=true

- #openshift_hosted_registry_acceptschema2=true

- #openshift_hosted_registry_enforcequota=true

- #

- # Any S3 service (Minio, ExoScale, ...): Basically the same as above

- # but with regionendpoint configured

- # S3 bucket must already exist.

- #openshift_hosted_registry_storage_kind=object

- #openshift_hosted_registry_storage_provider=s3

- #openshift_hosted_registry_storage_s3_accesskey=access_key_id

- #openshift_hosted_registry_storage_s3_secretkey=secret_access_key

- #openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/

- #openshift_hosted_registry_storage_s3_bucket=bucket_name

- #openshift_hosted_registry_storage_s3_region=bucket_region

- #openshift_hosted_registry_storage_s3_chunksize=26214400

- #openshift_hosted_registry_storage_s3_rootdirectory=/registry

- #openshift_hosted_registry_pullthrough=true

- #openshift_hosted_registry_acceptschema2=true

- #openshift_hosted_registry_enforcequota=true

- #

- # Additional CloudFront Options. When using CloudFront all three

- # of the followingg variables must be defined.

- #openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/

- #openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem

- #openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid

- # vSphere Volume with vSphere Cloud Provider

- # openshift_hosted_registry_storage_kind=vsphere

- # openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']

- # openshift_hosted_registry_storage_annotations=['volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume']

- #

- # GCS Storage Bucket

- #openshift_hosted_registry_storage_provider=gcs

- #openshift_hosted_registry_storage_gcs_bucket=bucket01

- #openshift_hosted_registry_storage_gcs_keyfile=test.key

- #openshift_hosted_registry_storage_gcs_rootdirectory=/registry

- 

- # Metrics deployment

- # See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html

- #

- # By default metrics are not automatically deployed, set this to enable them

- openshift_metrics_install_metrics=true

- openshift_metrics_cassandra_storage_type=emptydir

- openshift_metrics_start_cluster=true

- openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"}

- {% if openshift_metrics_deploy is defined %}

- {% if openshift_metrics_deploy %}

- openshift_hosted_metrics_deploy=true

- {% endif %}

- {% endif %}

- #

- # Storage Options

- # If openshift_metrics_storage_kind is unset then metrics will be stored

- # in an EmptyDir volume and will be deleted when the cassandra pod terminates.

- # Storage options A & B currently support only one cassandra pod which is

- # generally enough for up to 1000 pods. Additional volumes can be created

- # manually after the fact and metrics scaled per the docs.

- #

- # Option A - NFS Host Group

- # An NFS volume will be created with path "nfs_directory/volume_name"

- # on the host within the [nfs] host group.  For example, the volume

- # path using these options would be "/exports/metrics".  "exports" is

- # is the name of the export served by the nfs server.  "metrics" is

- # the name of a directory inside of "/exports".

- #openshift_metrics_storage_kind=nfs

- #openshift_metrics_storage_access_modes=['ReadWriteOnce']

- #openshift_metrics_storage_nfs_directory=/exports

- #openshift_metrics_storage_nfs_options='*(rw,root_squash)'

- #openshift_metrics_storage_volume_name=metrics

- #openshift_metrics_storage_volume_size=10Gi

- #openshift_metrics_storage_labels={'storage': 'metrics'}

- #

- # Option B - External NFS Host

- # NFS volume must already exist with path "nfs_directory/_volume_name" on

- # the storage_host. For example, the remote volume path using these

- # options would be "nfs.example.com:/exports/metrics".  "exports" is

- # is the name of the export served by the nfs server.  "metrics" is

- # the name of a directory inside of "/exports".

- #openshift_metrics_storage_kind=nfs

- #openshift_metrics_storage_access_modes=['ReadWriteOnce']

- #openshift_metrics_storage_host=nfs.example.com

- #openshift_metrics_storage_nfs_directory=/exports

- #openshift_metrics_storage_volume_name=metrics

- #openshift_metrics_storage_volume_size=10Gi

- #openshift_metrics_storage_labels={'storage': 'metrics'}

- #

- # Option C - Dynamic -- If openshift supports dynamic volume provisioning for

- # your cloud platform use this.

- #openshift_metrics_storage_kind=dynamic

- #

- # Other Metrics Options -- Common items you may wish to reconfigure, for the complete

- # list of options please see roles/openshift_metrics/README.md

- #

- # Override metricsPublicURL in the master config for cluster metrics

- # Defaults to https://hawkular-metrics.{openshift_master_default_subdomain}/hawkular/metrics

- # Currently, you may only alter the hostname portion of the url, alterting the

- # `/hawkular/metrics` path will break installation of metrics.

- #openshift_metrics_hawkular_hostname=hawkular-metrics.example.com

- # Configure the metrics component images # Note, these will be modified by oreg_url by default

- #openshift_metrics_cassandra_image="docker.io/openshift/origin-metrics-cassandra:{ openshift_image_tag }"

- #openshift_metrics_hawkular_agent_image="docker.io/openshift/origin-metrics-hawkular-openshift-agent:{ openshift_image_tag }"

- #openshift_metrics_hawkular_metrics_image="docker.io/openshift/origin-metrics-hawkular-metrics:{ openshift_image_tag }"

- #openshift_metrics_schema_installer_image="docker.io/openshift/origin-metrics-schema-installer:{ openshift_image_tag }"

- #openshift_metrics_heapster_image="docker.io/openshift/origin-metrics-heapster:{ openshift_image_tag }"

- # when openshift_deployment_type=='openshift-enterprise'

- #openshift_metrics_cassandra_image="registry.access.redhat.com/openshift3/metrics-cassandra:{ openshift_image_tag }"

- #openshift_metrics_hawkular_agent_image="registry.access.redhat.com/openshift3/metrics-hawkular-openshift-agent:{ openshift_image_tag }"

- #openshift_metrics_hawkular_metrics_image="registry.access.redhat.com/openshift3/metrics-hawkular-metrics:{ openshift_image_tag }"

- #openshift_metrics_schema_installer_image="registry.access.redhat.com/openshift3/metrics-schema-installer:{ openshift_image_tag }"

- #openshift_metrics_heapster_image="registry.access.redhat.com/openshift3/metrics-heapster:{ openshift_image_tag }"

- #

- # StorageClass

- # openshift_storageclass_name=gp2

- # openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}

- # openshift_storageclass_mount_options=['dir_mode=0777', 'file_mode=0777']

- # openshift_storageclass_reclaim_policy="Delete"

- #

- # PersistentLocalStorage

- # If Persistent Local Storage is wanted, this boolean can be defined to True.

- # This will create all necessary configuration to use persistent storage on nodes.

- #openshift_persistentlocalstorage_enabled=False

- #openshift_persistentlocalstorage_classes=[]

- #openshift_persistentlocalstorage_path=/mnt/local-storage

- #openshift_persistentlocalstorage_provisionner_image=quay.io/external_storage/local-volume-provisioner:v1.0.1

- 

- # Logging deployment

- #

- # Currently logging deployment is disabled by default, enable it by setting this

- openshift_logging_install_logging=true

- openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"}

- #

- # Logging storage config

- # Option A - NFS Host Group

- # An NFS volume will be created with path "nfs_directory/volume_name"

- # on the host within the [nfs] host group.  For example, the volume

- # path using these options would be "/exports/logging".  "exports" is

- # is the name of the export served by the nfs server.  "logging" is

- # the name of a directory inside of "/exports".

- #openshift_logging_storage_kind=nfs

- #openshift_logging_storage_access_modes=['ReadWriteOnce']

- #openshift_logging_storage_nfs_directory=/exports

- #openshift_logging_storage_nfs_options='*(rw,root_squash)'

- #openshift_logging_storage_volume_name=logging

- #openshift_logging_storage_volume_size=10Gi

- #openshift_logging_storage_labels={'storage': 'logging'}

- #

- # Option B - External NFS Host

- # NFS volume must already exist with path "nfs_directory/_volume_name" on

- # the storage_host. For example, the remote volume path using these

- # options would be "nfs.example.com:/exports/logging".  "exports" is

- # is the name of the export served by the nfs server.  "logging" is

- # the name of a directory inside of "/exports".

- #openshift_logging_storage_kind=nfs

- #openshift_logging_storage_access_modes=['ReadWriteOnce']

- #openshift_logging_storage_host=nfs.example.com

- #openshift_logging_storage_nfs_directory=/exports

- #openshift_logging_storage_volume_name=logging

- #openshift_logging_storage_volume_size=10Gi

- #openshift_logging_storage_labels={'storage': 'logging'}

- openshift_logging_storage_kind=nfs

- openshift_logging_storage_access_modes=['ReadWriteOnce']

- openshift_logging_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com

- openshift_logging_storage_nfs_directory=/

- openshift_logging_storage_volume_name=openshift-prod-logging

- openshift_logging_storage_volume_size=100Gi

- #

- # Option C - Dynamic -- If openshift supports dynamic volume provisioning for

- # your cloud platform use this.

- #openshift_logging_storage_kind=dynamic

- #

- # Option D - none -- Logging will use emptydir volumes which are destroyed when

- # pods are deleted

- #

- # Other Logging Options -- Common items you may wish to reconfigure, for the complete

- # list of options please see roles/openshift_logging/README.md

- #

- # Configure loggingPublicURL in the master config for aggregate logging, defaults

- # to kibana.{ openshift_master_default_subdomain }

- #openshift_logging_kibana_hostname=logging.apps.example.com

- # Configure the number of elastic search nodes, unless you're using dynamic provisioning

- # this value must be 1

- openshift_logging_es_cluster_size=1

- 

- # Prometheus deployment

- #

- # Currently prometheus deployment is disabled by default, enable it by setting this

- #openshift_hosted_prometheus_deploy=true

- #

- # Prometheus storage config

- # By default prometheus uses emptydir storage, if you want to persist you should

- # configure it to use pvc storage type. Each volume must be ReadWriteOnce.

- #openshift_prometheus_storage_type=emptydir

- #openshift_prometheus_alertmanager_storage_type=emptydir

- #openshift_prometheus_alertbuffer_storage_type=emptydir

- # Use PVCs for persistence

- #openshift_prometheus_storage_type=pvc

- #openshift_prometheus_alertmanager_storage_type=pvc

- #openshift_prometheus_alertbuffer_storage_type=pvc

- 

- # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')

- os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'

- 

- # Disable the OpenShift SDN plugin

- # openshift_use_openshift_sdn=False

- 

- # Configure SDN cluster network and kubernetes service CIDR blocks. These

- # network blocks should be private and should not conflict with network blocks

- # in your infrastructure that pods may require access to. Can not be changed

- # after deployment.

- #

- # WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of

- # 172.17.0.0/16.  Your installation will fail and/or your configuration change will

- # cause the Pod SDN or Cluster SDN to fail.

- #

- # WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting

- # docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS

- # environment variable located in /etc/sysconfig/docker-network.

- # When upgrading or scaling up the following must match whats in your master config!

- #  Inventory: master yaml field

- #  osm_cluster_network_cidr: clusterNetworkCIDR

- #  openshift_portal_net: serviceNetworkCIDR

- # When installing osm_cluster_network_cidr and openshift_portal_net must be set.

- # Sane examples are provided below.

- #osm_cluster_network_cidr=10.128.0.0/14

- #openshift_portal_net=172.30.0.0/16

- 

- # ExternalIPNetworkCIDRs controls what values are acceptable for the

- # service external IP field. If empty, no externalIP may be set. It

- # may contain a list of CIDRs which are checked for access. If a CIDR

- # is prefixed with !, IPs in that CIDR will be rejected. Rejections

- # will be applied first, then the IP checked against one of the

- # allowed CIDRs. You should ensure this range does not overlap with

- # your nodes, pods, or service CIDRs for security reasons.

- #openshift_master_external_ip_network_cidrs=['0.0.0.0/0']

- 

- # IngressIPNetworkCIDR controls the range to assign ingress IPs from for

- # services of type LoadBalancer on bare metal. If empty, ingress IPs will not

- # be assigned. It may contain a single CIDR that will be allocated from. For

- # security reasons, you should ensure that this range does not overlap with

- # the CIDRs reserved for external IPs, nodes, pods, or services.

- #openshift_master_ingress_ip_network_cidr=172.46.0.0/16

- 

- # Configure number of bits to allocate to each host's subnet e.g. 9

- # would mean a /23 network on the host.

- # When upgrading or scaling up the following must match whats in your master config!

- #  Inventory: master yaml field

- #  osm_host_subnet_length:  hostSubnetLength

- # When installing osm_host_subnet_length must be set. A sane example is provided below.

- #osm_host_subnet_length=9

- 

- # Configure master API and console ports.

- #openshift_master_api_port=8443

- #openshift_master_console_port=8443

- {% if openshift_api_port is defined and openshift_console_port is defined %}

- {% if openshift_api_port and openshift_console_port %}

- openshift_master_api_port={{openshift_api_port}}

- openshift_master_console_port={{openshift_console_port}}

- {% endif %}

- {% endif %}

- 

- # set exact RPM version (include - prefix)

- #openshift_pkg_version=-3.9.0

- # you may also specify version and release, ie:

- #openshift_pkg_version=-3.9.0-0.126.0.git.0.9351aae.el7

- 

- # Configure custom ca certificate

- #openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}

- #

- # NOTE: CA certificate will not be replaced with existing clusters.

- # This option may only be specified when creating a new cluster or

- # when redeploying cluster certificates with the redeploy-certificates

- # playbook.

- 

- # Configure custom named certificates (SNI certificates)

- #

- # https://docs.openshift.org/latest/install_config/certificate_customization.html

- # https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html

- #

- # NOTE: openshift_master_named_certificates is cached on masters and is an

- # additive fact, meaning that each run with a different set of certificates

- # will add the newly provided certificates to the cached set of certificates.

- #

- # An optional CA may be specified for each named certificate. CAs will

- # be added to the OpenShift CA bundle which allows for the named

- # certificate to be served for internal cluster communication.

- #

- # If you would like openshift_master_named_certificates to be overwritten with

- # the provided value, specify openshift_master_overwrite_named_certificates.

- #openshift_master_overwrite_named_certificates=true

- #

- # Provide local certificate paths which will be deployed to masters

- #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]

- #

- # Detected names may be overridden by specifying the "names" key

- #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]

- #

- # Add a trusted CA to all pods, copies from the control host, may be multiple

- # certs in one file

- #openshift_additional_ca=/path/to/additional-ca.crt

- 

- # Session options

- #openshift_master_session_name=ssn

- #openshift_master_session_max_seconds=3600

- 

- # An authentication and encryption secret will be generated if secrets

- # are not provided. If provided, openshift_master_session_auth_secrets

- # and openshift_master_encryption_secrets must be equal length.

- #

- # Signing secrets, used to authenticate sessions using

- # HMAC. Recommended to use secrets with 32 or 64 bytes.

- #openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']

- #

- # Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32

- # characters long, to select AES-128, AES-192, or AES-256.

- #openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']

- 

- # configure how often node iptables rules are refreshed

- #openshift_node_iptables_sync_period=5s

- 

- # Configure nodeIP in the node config

- # This is needed in cases where node traffic is desired to go over an

- # interface other than the default network interface.

- #openshift_set_node_ip=True

- 

- #openshift_node_kubelet_args is deprecated, use node config edits instead

- 

- # Configure logrotate scripts

- # See: https://github.com/nickhammond/ansible-logrotate

- #logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]

- 

- # The OpenShift-Ansible installer will fail when it detects that the

- # value of openshift_hostname resolves to an IP address not bound to any local

- # interfaces. This mis-configuration is problematic for any pod leveraging host

- # networking and liveness or readiness probes.

- # Setting this variable to false will override that check.

- #openshift_hostname_check=true

- 

- # openshift_use_dnsmasq is deprecated.  This must be true, or installs will fail

- # in versions >= 3.6

- #openshift_use_dnsmasq=False

- 

- # Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf

- # This is useful for POC environments where DNS may not actually be available yet or to set

- # options like 'strict-order' to alter dnsmasq configuration.

- #openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf

- 

- # Global Proxy Configuration

- # These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment

- # variables for docker and master services.

- #

- # Hosts in the openshift_no_proxy list will NOT use any globally

- # configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains

- # (.example.com), hosts (example.com), and IP addresses.

- #openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT

- #openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT

- #openshift_no_proxy='.hosts.example.com,some-host.com'

- #

- # Most environments don't require a proxy between openshift masters, nodes, and

- # etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.

- # If all of your hosts share a common domain you may wish to disable this and

- # specify that domain above instead.

- #

- # For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and

- # n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy

- # variable (above) and set this value to False

- #openshift_generate_no_proxy_hosts=True

- #

- # These options configure the BuildDefaults admission controller which injects

- # configuration into Builds. Proxy related values will default to the global proxy

- # config values. You only need to set these if they differ from the global proxy settings.

- # See BuildDefaults documentation at

- # https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html

- #openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT

- #openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT

- #openshift_builddefaults_no_proxy=mycorp.com

- #openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT

- #openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT

- #openshift_builddefaults_git_no_proxy=mycorp.com

- #openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]

- #openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}

- #openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}

- #openshift_builddefaults_resources_requests_cpu=100m

- #openshift_builddefaults_resources_requests_memory=256Mi

- #openshift_builddefaults_resources_limits_cpu=1000m

- #openshift_builddefaults_resources_limits_memory=512Mi

- 

- # Or you may optionally define your own build defaults configuration serialized as json

- #openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'

- 

- # These options configure the BuildOverrides admission controller which injects

- # configuration into Builds.

- # See BuildOverrides documentation at

- # https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html

- #openshift_buildoverrides_force_pull=true

- #openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]

- #openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}

- #openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}

- #openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}]

- 

- # Or you may optionally define your own build overrides configuration serialized as json

- #openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'

- 

- # Enable service catalog

- openshift_enable_service_catalog=true

- 

- # Enable template service broker (requires service catalog to be enabled, above)

- template_service_broker_install=true

- 

- # Specify an openshift_service_catalog image

- # (defaults for origin and openshift-enterprise, repsectively)

- #openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{ openshift_image_tag }""

- openshift_service_catalog_image="registry.access.redhat.com/openshift3/ose-service-catalog:v3.11.200"

- 

- # TSB image tag

- template_service_broker_version='v3.11.200'

- 

- # Configure one of more namespaces whose templates will be served by the TSB

- openshift_template_service_broker_namespaces=['openshift']

- 

- # masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default

- #openshift_master_dynamic_provisioning_enabled=True

- 

- # Admission plugin config

- #openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}

- 

- # Configure usage of openshift_clock role.

- openshift_clock_enabled=true

- 

- # OpenShift Per-Service Environment Variables

- # Environment variables are added to /etc/sysconfig files for

- # each OpenShift node.

- # API and controllers environment variables are merged in single

- # master environments.

- #openshift_node_env_vars={"ENABLE_HTTP2": "true"}

- {% if no_http2 is defined %}

- {% if no_http2 %}

- openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}

- openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}

- openshift_node_env_vars={"ENABLE_HTTP2": "true"}

- {% endif %}

- {% endif %}

- 

- # Enable API service auditing

- #openshift_master_audit_config={"enabled": "true"}

- #

- # In case you want more advanced setup for the auditlog you can

- # use this line.

- # The directory in "auditFilePath" will be created if it's not

- # exist

- #openshift_master_audit_config={"enabled": "true", "auditFilePath": "/var/lib/origin/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": "14", "maximumFileSizeMegabytes": "500", "maximumRetainedFiles": "5"}

- 

- # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used

- # by openshift_deployment_type=origin

- #openshift_enable_origin_repo=false

- 

- # Validity of the auto-generated OpenShift certificates in days.

- # See also openshift_hosted_registry_cert_expire_days above.

- #

- #openshift_ca_cert_expire_days=1825

- #openshift_node_cert_expire_days=730

- #openshift_master_cert_expire_days=730

- 

- # Validity of the auto-generated external etcd certificates in days.

- # Controls validity for etcd CA, peer, server and client certificates.

- #

- #etcd_ca_default_days=1825

- #

- # ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference

- # openshift_master_saconfig_limitsecretreferences=false

- 

- # Upgrade Control

- #

- # By default nodes are upgraded in a serial manner one at a time and all failures

- # are fatal, one set of variables for normal nodes, one set of variables for

- # nodes that are part of control plane as the number of hosts may be different

- # in those two groups.

- #openshift_upgrade_nodes_serial=1

- #openshift_upgrade_nodes_max_fail_percentage=0

- #openshift_upgrade_control_plane_nodes_serial=1

- #openshift_upgrade_control_plane_nodes_max_fail_percentage=0

- #

- # You can specify the number of nodes to upgrade at once. We do not currently

- # attempt to verify that you have capacity to drain this many nodes at once

- # so please be careful when specifying these values. You should also verify that

- # the expected number of nodes are all schedulable and ready before starting an

- # upgrade. If it's not possible to drain the requested nodes the upgrade will

- # stall indefinitely until the drain is successful.

- #

- # If you're upgrading more than one node at a time you can specify the maximum

- # percentage of failure within the batch before the upgrade is aborted. Any

- # nodes that do fail are ignored for the rest of the playbook run and you should

- # take care to investigate the failure and return the node to service so that

- # your cluster.

- #

- # The percentage must exceed the value, this would fail on two failures

- # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49

- # where as this would not

- # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50

- #

- # A timeout to wait for nodes to drain pods can be specified to ensure that the

- # upgrade continues even if nodes fail to drain pods in the allowed time. The

- # default value of 0 will wait indefinitely allowing the admin to investigate

- # the root cause and ensuring that disruption budgets are respected. If the

- # a timeout of 0 is used there will also be one attempt to re-try draining the

- # node. If a non zero timeout is specified there will be no attempt to retry.

- #openshift_upgrade_nodes_drain_timeout=0

- #

- # Multiple data migrations take place and if they fail they will fail the upgrade

- # You may wish to disable these or make them non fatal

- #

- # openshift_upgrade_pre_storage_migration_enabled=true

- # openshift_upgrade_pre_storage_migration_fatal=true

- # openshift_upgrade_post_storage_migration_enabled=true

- # openshift_upgrade_post_storage_migration_fatal=false

- 

- ######################################################################

- # CloudForms/ManageIQ (CFME/MIQ) Configuration

- 

- # See the readme for full descriptions and getting started

- # instructions: ../../roles/openshift_management/README.md or go directly to

- # their definitions: ../../roles/openshift_management/defaults/main.yml

- # ../../roles/openshift_management/vars/main.yml

- #

- # Namespace for the CFME project

- #openshift_management_project: openshift-management

- 

- # Namespace/project description

- #openshift_management_project_description: CloudForms Management Engine

- 

- # Choose 'miq-template' for a podified database install

- # Choose 'miq-template-ext-db' for an external database install

- #

- # If you are using the miq-template-ext-db template then you must add

- # the required database parameters to the

- # openshift_management_template_parameters variable.

- #openshift_management_app_template: miq-template

- 

- # Allowed options: nfs, nfs_external, preconfigured, cloudprovider.

- #openshift_management_storage_class: nfs

- 

- # [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a

- # netapp appliance, then you must set the hostname here. Leave the

- # value as 'false' if you are not using external NFS.

- #openshift_management_storage_nfs_external_hostname: false

- 

- # [OPTIONAL] - If you are using external NFS then you must set the base

- # path to the exports location here.

- #

- # Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports

- # that will back the application PV and optionally the database

- # pv. Export path definitions, relative to

- # { openshift_management_storage_nfs_base_dir}

- #

- # LOCAL NFS NOTE:

- #

- # You may may also change this value if you want to change the default

- # path used for local NFS exports.

- #openshift_management_storage_nfs_base_dir: /exports

- 

- # LOCAL NFS NOTE:

- #

- # You may override the automatically selected LOCAL NFS server by

- # setting this variable. Useful for testing specific task files.

- #openshift_management_storage_nfs_local_hostname: false

- 

- # These are the default values for the username and password of the

- # management app. Changing these values in your inventory will not

- # change your username or password. You should only need to change

- # these values in your inventory if you already changed the actual

- # name and password AND are trying to use integration scripts.

- #

- # For example, adding this cluster as a container provider,

- # playbooks/openshift-management/add_container_provider.yml

- #openshift_management_username: admin

- #openshift_management_password: smartvm

- 

- # A hash of parameters you want to override or set in the

- # miq-template.yaml or miq-template-ext-db.yaml templates. Set this in

- # your inventory file as a simple hash. Acceptable values are defined

- # under the .parameters list in files/miq-template{-ext-db}.yaml

- # Example:

- #

- # openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}

- #openshift_management_template_parameters: {}

- 

- # Firewall configuration

- # You can open additional firewall ports by defining them as a list. of service

- # names and ports/port ranges for either masters or nodes.

- #openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]

- #openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]

- 

- # Service port node range

- #openshift_node_port_range=30000-32767

- 

- # Enable unsupported configurations, things that will yield a partially

- # functioning cluster but would not be supported for production use

- #openshift_enable_unsupported_configurations=false

- openshift_enable_unsupported_configurations=True

Related: https://pagure.io/fedora-infrastructure/issue/9693

Should we rename cluster-inventory-iad2-prod.j2 to cluster-inventory-prod.j2 (for consistency with cluster-inventory-stg.j2) ?
In cluster-inventory-iad2-prod.j2, I'm not sure why we have a {% if env == "staging" %} block since the task referring to it already have a env == 'production' condition.
Should we remove this part for clarity ?

Metadata Update from @smooge:
- Pull-request tagged with: post-freeze

3 years ago

rebased onto 299c9de227954b88d6dff35460fc5d1e684e3e08

3 years ago

rebased onto 299c9de227954b88d6dff35460fc5d1e684e3e08

3 years ago

rebased onto 35664e9

3 years ago

rebased onto 35664e9

3 years ago

Pull-Request has been merged by kevin

3 years ago