-
Notifications
You must be signed in to change notification settings - Fork 3
Inventory file and installation
Khodayar Doustar edited this page May 25, 2020
·
6 revisions
Most important step in this installation is to correctly prepare Ansible inventory file.
My setup is somehow complicated and here is my complete inventory file:
[root@dns01 ~]# cat /etc/ansible/hosts
# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups.
# The lb group lets Ansible configure HAProxy as the load balancing solution.
# Comment lb out if your load balancer is pre-configured.
[OSEv3:children]
masters
nodes
etcd
lb
glusterfs
glusterfs_registry
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
ansible_user=root
ansible_ssh_user=root
deployment_type=origin
openshift_release=v3.11
openshift_web_console_install=true
openshift_cockpit_deployer_image='docker.io/timbordemann/cockpit-kubernetes:latest'
openshift_master_default_subdomain=apps.okd.lcl
openshift_master_dynamic_provisioning_enabled=true
openshift_router_selector='node-role.kubernetes.io/infra=true'
openshift_registry_selector='node-role.kubernetes.io/infra=true'
openshift_logging_elasticsearch_memory_limit=8Gi
# logging
openshift_logging_install_logging=true
openshift_logging_es_pvc_dynamic=true
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_pvc_size=25Gi
openshift_logging_es_pvc_storage_class_name="glusterfs-registry"
# metrics
openshift_metrics_install_metrics=true
openshift_metrics_storage_kind=dynamic
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_storage_volume_size=25Gi
openshift_metrics_cassandra_pvc_storage_class_name="glusterfs-registry"
# glusterfs
openshift_storage_glusterfs_timeout=900
openshift_storage_glusterfs_namespace=glusterfs
openshift_storage_glusterfs_storageclass=true
openshift_storage_glusterfs_storageclass_default=true
# glusterfs_registry
openshift_storage_glusterfs_registry_namespace=glusterfs-registry
openshift_storage_glusterfs_registry_storageclass=true
openshift_storage_glusterfs_registry_storageclass_default=false
# glusterfs_registry_storage
openshift_hosted_registry_storage_kind=glusterfs
openshift_hosted_registry_storage_volume_size=25Gi
openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true"
openshift_storage_glusterfs_heketi_admin_key='heketi'
openshift_storage_glusterfs_heketi_user_key='PASSWORD'
openshift_storage_glusterfs_image='gluster/gluster-centos:latest'
openshift_storage_glusterfs_heketi_image='heketi/heketi:latest'
openshift_storage_glusterfs_block_image='gluster/glusterblock-provisioner:latest'
# uncomment the following to enable htpasswd authentication; defaults to AllowAllPasswordIdentityProvider
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# Native HA with an Internal & External LB VIPs
openshift_master_cluster_method=native
openshift_master_cluster_hostname=nlb.okd.lcl
openshift_master_cluster_public_hostname=xlb.okd.lcl
# host group for masters
[masters]
mst[01:03].okd.lcl
# host group for etcd
[etcd]
mst[01:03].okd.lcl
# host group for nodes, includes region info
[nodes]
mst[01:03].okd.lcl openshift_node_group_name='node-config-master'
wrk[01:03].okd.lcl openshift_node_group_name='node-config-compute'
inf[01:03].okd.lcl openshift_node_group_name='node-config-infra'
glf[01:03].okd.lcl openshift_node_group_name='node-config-compute'
glr[01:03].okd.lcl openshift_node_group_name='node-config-infra'
[glusterfs]
glf01.okd.lcl glusterfs_zone=1 glusterfs_hostname=glf01.okd.lcl glusterfs_ip=10.1.1.61 glusterfs_devices='["/dev/sdc"]'
glf02.okd.lcl glusterfs_zone=1 glusterfs_hostname=glf02.okd.lcl glusterfs_ip=10.1.1.62 glusterfs_devices='["/dev/sdc"]'
glf03.okd.lcl glusterfs_zone=1 glusterfs_hostname=glf03.okd.lcl glusterfs_ip=10.1.1.63 glusterfs_devices='["/dev/sdc"]'
[glusterfs_registry]
glr01.okd.lcl glusterfs_zone=1 glusterfs_hostname=glr01.okd.lcl glusterfs_ip=10.1.1.71 glusterfs_devices='["/dev/sdc"]'
glr02.okd.lcl glusterfs_zone=1 glusterfs_hostname=glr02.okd.lcl glusterfs_ip=10.1.1.72 glusterfs_devices='["/dev/sdc"]'
glr03.okd.lcl glusterfs_zone=1 glusterfs_hostname=glr03.okd.lcl glusterfs_ip=10.1.1.73 glusterfs_devices='["/dev/sdc"]'
[lb]
nlb.okd.lcl
[extras]
#xlb.okd.lcl
#dns01.okd.lcl
#nlb01.okd.lcl
#nlb02.okd.lcl
#xlb01.okd.lcl
#xlb02.okd.lcl
[root@dns01 ~]#
[root@dns01 ~]# cd /root/openshift-ansible/
[root@dns01 openshift-ansible]# ansible-playbook playbooks/prerequisites.yml
[root@dns01 openshift-ansible]# ansible-playbook playbooks/deploy_cluster.yml
You can find more information about install, uninstall and retrying installation in the main doc here.