Tong Li f455064066 Added cockroachdb cluster setup across multiple clouds
This patch does the following:
1. Added the apps role so that k8s app deployment can be placed in the role
2. Added cockroachdb-init-pod deployment inthe post role since this has to
   be deployed first.
3. Added cockroachdb-pod deployment in the apps role
4. Allow this workload to stand up a standalone cockroachdb cluster or
   join in an existing cockroachdb cluster
5. Added the cockroachdb load generator container so that once the cluster
   is started, there will be some load generated.
6. Added a way to pre-allocating floating IP addresses so that the workload
   can use pre-allocated floating IPs for VMs.

Change-Id: Ifa9eeb9d761d9801cab580445e6c43c8cf1dfdaa
2017-05-03 03:01:56 +00:00

144 lines
3.0 KiB
YAML
Executable File

---
- name: Get start timestamp
hosts: cloud
connection: local
tasks:
- set_fact:
starttime: "{{ ansible_date_time }}"
tags: "info"
- name: Prepare to run the workload
hosts: cloud
connection: local
vars_files:
- "vars/{{ env }}.yml"
tasks:
- include: "roles/prepare/tasks/{{ action }}.yml"
roles:
- prepare
tags: "{{ action }}"
- name: provision servers
hosts: prohosts
connection: local
strategy: free
vars_files:
- "vars/{{ env }}.yml"
tasks:
- include: "roles/provision/tasks/{{ action }}.yml"
roles:
- provision
tags: "{{ action }}"
- name: Post provision process
hosts: cloud
connection: local
vars_files:
- "vars/{{ env }}.yml"
tasks:
- include: "roles/postprovision/tasks/{{ action }}.yml"
roles:
- postprovision
tags: "{{ action }}"
- name: Boot strap all the target nodes
hosts: cmasters, cworkers
gather_facts: False
user: "{{ app_env.ssh_user }}"
become: true
become_user: root
strategy: free
vars_files:
- "vars/{{ env }}.yml"
roles:
- vmware.coreos-bootstrap
tags: "apply"
- name: Install required packages for all nodes
hosts: cworkers, cmasters, uworkers, umasters
gather_facts: False
user: "{{ app_env.ssh_user }}"
become: true
become_user: root
strategy: free
vars_files:
- "vars/{{ env }}.yml"
roles:
- common
environment: "{{ proxy_env }}"
tags: "common"
- name: Setup master
hosts: cmasters, umasters
gather_facts: true
user: "{{ app_env.ssh_user }}"
become: true
become_user: root
vars_files:
- "vars/{{ env }}.yml"
roles:
- master
environment: "{{ proxy_env }}"
tags: "master"
- name: Setup workers
hosts: cworkers, cmasters, uworkers, umasters
gather_facts: true
user: "{{ app_env.ssh_user }}"
become: true
become_user: root
strategy: free
vars_files:
- "vars/{{ env }}.yml"
roles:
- worker
environment: "{{ proxy_env }}"
tags: "worker"
- name: Post configurations
hosts: cmasters, umasters
gather_facts: true
user: "{{ app_env.ssh_user }}"
become: true
become_user: root
vars_files:
- "vars/{{ env }}.yml"
tasks:
- include: "roles/post/tasks/{{ action }}.yml"
roles:
- post
environment: "{{ proxy_env }}"
tags: "post"
- name: Start up applications
hosts: cworkers, uworkers
gather_facts: true
user: "{{ app_env.ssh_user }}"
become: true
become_user: root
vars_files:
- "vars/{{ env }}.yml"
roles:
- apps
environment: "{{ proxy_env }}"
tags: "apps"
- name: Inform the installer
hosts: cloud
connection: local
tasks:
- debug:
msg: >-
Access kubernetes dashboard at
http://{{ groups['umasters'][0] }}:30000
when: groups['umasters'] is defined
- debug:
msg: >-
Access kubernetes dashboard at
http://{{ groups['cmasters'][0] }}:30000
when: groups['cmasters'] is defined
- debug:
msg: >-
The work load started at {{ hostvars.cloud.starttime.time }},
ended at {{ ansible_date_time.time }}
tags: "info"