move both ansible and terraform existing workloads to this repository
Change-Id: Ie1091cb4c5bbefa7da8f3d095be33860a56432cc
This commit is contained in:
parent
40a1cc422b
commit
c2d9031536
4
workloads/ansible/shade/dockerswarm/.gitignore
vendored
Executable file
4
workloads/ansible/shade/dockerswarm/.gitignore
vendored
Executable file
@ -0,0 +1,4 @@
|
||||
*.out
|
||||
*/**/*.log
|
||||
*/**/.DS_Store
|
||||
*/**/._
|
3
workloads/ansible/shade/dockerswarm/ansible.cfg
Normal file
3
workloads/ansible/shade/dockerswarm/ansible.cfg
Normal file
@ -0,0 +1,3 @@
|
||||
[defaults]
|
||||
inventory = ./hosts
|
||||
host_key_checking=False
|
1
workloads/ansible/shade/dockerswarm/hosts
Normal file
1
workloads/ansible/shade/dockerswarm/hosts
Normal file
@ -0,0 +1 @@
|
||||
cloud ansible_host=127.0.0.1 ansible_python_interpreter=python
|
19
workloads/ansible/shade/dockerswarm/roles/post_apply/tasks/main.yml
Executable file
19
workloads/ansible/shade/dockerswarm/roles/post_apply/tasks/main.yml
Executable file
@ -0,0 +1,19 @@
|
||||
---
|
||||
- debug:
|
||||
msg: >-
|
||||
export DOCKER_HOST=tcp://{{ hostvars.swarmnode1.swarmnode.openstack.public_v4 }}:2375;
|
||||
export DOCKER_TLS_VERIFY=1;
|
||||
export DOCKER_CERT_PATH=/tmp/{{ env }}/keys
|
||||
when: hostvars.swarmnode1.swarmnode.openstack.public_v4 != ""
|
||||
|
||||
- debug:
|
||||
msg: >-
|
||||
export DOCKER_HOST=tcp://{{ hostvars.swarmnode1.swarmnode.openstack.private_v4 }}:2375;
|
||||
export DOCKER_TLS_VERIFY=1;
|
||||
export DOCKER_CERT_PATH=/tmp/{{ env }}/keys
|
||||
when: hostvars.swarmnode1.swarmnode.openstack.public_v4 == ""
|
||||
|
||||
- debug:
|
||||
msg: >-
|
||||
The work load test started at {{ starttime.time }},
|
||||
ended at {{ ansible_date_time.time }}
|
27
workloads/ansible/shade/dockerswarm/roles/post_destroy/tasks/main.yml
Executable file
27
workloads/ansible/shade/dockerswarm/roles/post_destroy/tasks/main.yml
Executable file
@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Remove security group
|
||||
os_security_group:
|
||||
state: absent
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: dockerswarm_sg
|
||||
description: secuirty group for dockerswarm
|
||||
|
||||
- name: Delete discovery url directory
|
||||
file: path="/tmp/{{ env }}" state=absent
|
||||
|
||||
- name: Delete a key-pair
|
||||
os_keypair:
|
||||
state: absent
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: "dockerswarm"
|
||||
|
||||
- debug:
|
||||
msg: >-
|
||||
The work load test started at {{ starttime.time }},
|
||||
ended at {{ ansible_date_time.time }}
|
96
workloads/ansible/shade/dockerswarm/roles/prep_apply/tasks/main.yml
Executable file
96
workloads/ansible/shade/dockerswarm/roles/prep_apply/tasks/main.yml
Executable file
@ -0,0 +1,96 @@
|
||||
---
|
||||
- name: Get start timestamp
|
||||
set_fact: starttime="{{ ansible_date_time }}"
|
||||
|
||||
- name: Create certificate directory
|
||||
file: path="/tmp/{{ env }}/keys" state=directory
|
||||
|
||||
- stat: path="/tmp/{{ env }}/discovery_url"
|
||||
register: discovery_url_flag
|
||||
|
||||
- name: Get docker discovery url
|
||||
get_url:
|
||||
url: "https://discovery.etcd.io/new?size={{ app_env.swarm_size }}"
|
||||
dest: "/tmp/{{ env }}/discovery_url"
|
||||
when: discovery_url_flag.stat.exists == false
|
||||
|
||||
- shell: openssl genrsa -out "/tmp/{{ env }}/keys/ca-key.pem" 2048
|
||||
- shell: openssl genrsa -out "/tmp/{{ env }}/keys/key.pem" 2048
|
||||
|
||||
- shell: >-
|
||||
openssl req -x509 -new -nodes -key /tmp/{{ env }}/keys/ca-key.pem
|
||||
-days 10000 -out /tmp/{{ env }}/keys/ca.pem -subj '/CN=docker-CA'
|
||||
|
||||
- shell: >-
|
||||
openssl req -new -key /tmp/{{ env }}/keys/key.pem
|
||||
-out /tmp/{{ env }}/keys/cert.csr
|
||||
-subj '/CN=docker-client' -config ./roles/prov_apply/templates/openssl.cnf
|
||||
|
||||
- shell: >-
|
||||
openssl x509 -req -in /tmp/{{ env }}/keys/cert.csr
|
||||
-CA /tmp/{{ env }}/keys/ca.pem -CAkey /tmp/{{ env }}/keys/ca-key.pem
|
||||
-CAcreateserial -out /tmp/{{ env }}/keys/cert.pem -days 365
|
||||
-extensions v3_req -extfile ./roles/prov_apply/templates/openssl.cnf
|
||||
|
||||
- name: Retrieve specified flavor
|
||||
os_flavor_facts:
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: "{{ app_env.flavor_name }}"
|
||||
|
||||
- name: Create a key-pair
|
||||
os_keypair:
|
||||
state: "present"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: "dockerswarm"
|
||||
public_key_file: "{{ app_env.public_key_file }}"
|
||||
|
||||
- name: Create security group
|
||||
os_security_group:
|
||||
state: present
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: dockerswarm_sg
|
||||
description: secuirty group for dockerswarm
|
||||
|
||||
- name: Add security rules
|
||||
os_security_group_rule:
|
||||
state: present
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
security_group: dockerswarm_sg
|
||||
protocol: "{{ item.protocol }}"
|
||||
direction: "{{ item.dir }}"
|
||||
port_range_min: "{{ item.p_min }}"
|
||||
port_range_max: "{{ item.p_max }}"
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
with_items:
|
||||
- { p_min: 22, p_max: 22, dir: ingress, protocol: tcp }
|
||||
- { p_min: 2375, p_max: 2376, dir: ingress, protocol: tcp }
|
||||
- { p_min: 2379, p_max: 2380, dir: ingress, protocol: tcp }
|
||||
- { p_min: 2379, p_max: 2380, dir: egress, protocol: tcp }
|
||||
- { p_min: -1, p_max: -1, dir: ingress, protocol: icmp }
|
||||
- { p_min: -1, p_max: -1, dir: egress, protocol: icmp }
|
||||
|
||||
- name: Create cloudinit file for all nodes
|
||||
template:
|
||||
src: templates/cloudinit.j2
|
||||
dest: "/tmp/{{ env }}/cloudinit"
|
||||
|
||||
- name: Add nodes to host group
|
||||
add_host:
|
||||
name: "swarmnode{{ item }}"
|
||||
hostname: "127.0.0.1"
|
||||
groups: dockerswarm
|
||||
host_no: "{{ item }}"
|
||||
with_sequence: count={{ app_env.swarm_size }}
|
||||
no_log: True
|
47
workloads/ansible/shade/dockerswarm/roles/prep_apply/templates/cloudinit.j2
Executable file
47
workloads/ansible/shade/dockerswarm/roles/prep_apply/templates/cloudinit.j2
Executable file
@ -0,0 +1,47 @@
|
||||
#cloud-config
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd.service
|
||||
mask: true
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: docker.service
|
||||
command: start
|
||||
- name: swarm-agent.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=swarm agent
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/environment
|
||||
TimeoutStartSec=20m
|
||||
ExecStartPre=/usr/bin/docker pull swarm:latest
|
||||
ExecStartPre=-/usr/bin/docker rm -f swarm-agent
|
||||
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-agent swarm:latest join --addr=$COREOS_PRIVATE_IPV4:2376 etcd://$COREOS_PRIVATE_IPV4:2379/docker"
|
||||
ExecStop=/usr/bin/docker stop swarm-agent
|
||||
- name: swarm-manager.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=swarm manager
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/environment
|
||||
TimeoutStartSec=20m
|
||||
ExecStartPre=/usr/bin/docker pull swarm:latest
|
||||
ExecStartPre=-/usr/bin/docker rm -f swarm-manager
|
||||
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-manager -v /etc/docker/ssl:/etc/docker/ssl --net=host swarm:latest manage --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem etcd://$COREOS_PRIVATE_IPV4:2379/docker"
|
||||
ExecStop=/usr/bin/docker stop swarm-manager
|
||||
etcd2:
|
||||
discovery: {{ lookup('file', '/tmp/'+env+'/discovery_url') }}
|
||||
advertise-client-urls: http://$private_ipv4:2379
|
||||
initial-advertise-peer-urls: http://$private_ipv4:2380
|
||||
listen-client-urls: http://0.0.0.0:2379
|
||||
listen-peer-urls: http://$private_ipv4:2380
|
||||
data-dir: /var/lib/etcd2
|
||||
initial-cluster-token: openstackinterop
|
||||
update:
|
||||
reboot-strategy: "off"
|
13
workloads/ansible/shade/dockerswarm/roles/prep_destroy/tasks/main.yml
Executable file
13
workloads/ansible/shade/dockerswarm/roles/prep_destroy/tasks/main.yml
Executable file
@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: Get start timestamp
|
||||
set_fact: starttime="{{ ansible_date_time }}"
|
||||
|
||||
- name: Add web servers to webservers host group
|
||||
add_host:
|
||||
name: "swarmnode{{ item }}"
|
||||
hostname: "127.0.0.1"
|
||||
groups: dockerswarm
|
||||
host_no: "{{ item }}"
|
||||
with_sequence: count={{ app_env.swarm_size }}
|
||||
no_log: True
|
||||
|
39
workloads/ansible/shade/dockerswarm/roles/prov_apply/tasks/main.yml
Executable file
39
workloads/ansible/shade/dockerswarm/roles/prov_apply/tasks/main.yml
Executable file
@ -0,0 +1,39 @@
|
||||
---
|
||||
- name: Get public IP
|
||||
set_fact: node_ip="{{ swarmnode.openstack.public_v4 }}"
|
||||
when: swarmnode.openstack.public_v4 != ""
|
||||
|
||||
- name: Get public IP
|
||||
set_fact: node_ip="{{ swarmnode.openstack.private_v4 }}"
|
||||
when: swarmnode.openstack.public_v4 == ""
|
||||
|
||||
- name: Make certificate configuration file
|
||||
copy:
|
||||
src: templates/openssl.cnf
|
||||
dest: "/tmp/{{ env }}/{{ node_ip }}/keys/"
|
||||
|
||||
- name: Make service file
|
||||
template:
|
||||
src: templates/dockerservice.j2
|
||||
dest: "/tmp/{{ env }}/{{ node_ip }}/keys/dockerservice.cnf"
|
||||
|
||||
- name: Create bootstrap file
|
||||
template:
|
||||
src: templates/bootstrap1.j2
|
||||
dest: "/tmp/{{ env }}/{{ node_ip }}/keys/bootstrap.sh"
|
||||
when: swarmnode.openstack.private_v4 == ""
|
||||
|
||||
- name: Create bootstrap file
|
||||
template:
|
||||
src: templates/bootstrap2.j2
|
||||
dest: "/tmp/{{ env }}/{{ node_ip }}/keys/bootstrap.sh"
|
||||
when: swarmnode.openstack.private_v4 != ""
|
||||
|
||||
- name: Transfer configureation
|
||||
shell: scp -r "/tmp/{{ env }}/{{ node_ip }}/keys" "core@{{ node_ip }}:/home/core"
|
||||
|
||||
- name: Transfer certificate file over to the nodes
|
||||
shell: scp -r "/tmp/{{ env }}/keys" "core@{{ node_ip }}:/home/core"
|
||||
|
||||
- name: Start services
|
||||
shell: ssh "core@{{ node_ip }}" "sh keys/bootstrap.sh"
|
31
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap1.j2
Executable file
31
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap1.j2
Executable file
@ -0,0 +1,31 @@
|
||||
mkdir -p /home/core/.docker
|
||||
cp /home/core/keys/ca.pem /home/core/.docker/
|
||||
cp /home/core/keys/cert.pem /home/core/.docker/
|
||||
cp /home/core/keys/key.pem /home/core/.docker/
|
||||
|
||||
echo 'subjectAltName = @alt_names' >> /home/core/keys/openssl.cnf
|
||||
echo '[alt_names]' >> /home/core/keys/openssl.cnf
|
||||
|
||||
cd /home/core/keys
|
||||
|
||||
echo 'IP.1 = {{ swarmnode.openstack.public_v4 }}' >> openssl.cnf
|
||||
echo 'DNS.1 = {{ app_env.fqdn }}' >> openssl.cnf
|
||||
echo 'DNS.2 = {{ swarmnode.openstack.public_v4 }}.xip.io' >> openssl.cnf
|
||||
|
||||
openssl req -new -key key.pem -out cert.csr -subj '/CN=docker-client' -config openssl.cnf
|
||||
openssl x509 -req -in cert.csr -CA ca.pem -CAkey ca-key.pem \
|
||||
-CAcreateserial -out cert.pem -days 365 -extensions v3_req -extfile openssl.cnf
|
||||
|
||||
sudo mkdir -p /etc/docker/ssl
|
||||
sudo cp ca.pem /etc/docker/ssl/
|
||||
sudo cp cert.pem /etc/docker/ssl/
|
||||
sudo cp key.pem /etc/docker/ssl/
|
||||
|
||||
# Apply localized settings to services
|
||||
sudo mkdir -p /etc/systemd/system/{docker,swarm-agent,swarm-manager}.service.d
|
||||
|
||||
sudo mv /home/core/keys/dockerservice.cnf /etc/systemd/system/docker.service.d/10-docker-service.conf
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker.service
|
||||
sudo systemctl start swarm-agent.service
|
||||
sudo systemctl start swarm-manager.service
|
32
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap2.j2
Executable file
32
workloads/ansible/shade/dockerswarm/roles/prov_apply/templates/bootstrap2.j2
Executable file
@ -0,0 +1,32 @@
|
||||
mkdir -p /home/core/.docker
|
||||
cp /home/core/keys/ca.pem /home/core/.docker/
|
||||
cp /home/core/keys/cert.pem /home/core/.docker/
|
||||
cp /home/core/keys/key.pem /home/core/.docker/
|
||||
|
||||
echo 'subjectAltName = @alt_names' >> /home/core/keys/openssl.cnf
|
||||
echo '[alt_names]' >> /home/core/keys/openssl.cnf
|
||||
|
||||
cd /home/core/keys
|
||||
|
||||
echo 'IP.1 = {{ swarmnode.openstack.private_v4 }}' >> openssl.cnf
|
||||
echo 'IP.2 = {{ swarmnode.openstack.public_v4 }}' >> openssl.cnf
|
||||
echo 'DNS.1 = {{ app_env.fqdn }}' >> openssl.cnf
|
||||
echo 'DNS.2 = {{ swarmnode.openstack.public_v4 }}.xip.io' >> openssl.cnf
|
||||
|
||||
openssl req -new -key key.pem -out cert.csr -subj '/CN=docker-client' -config openssl.cnf
|
||||
openssl x509 -req -in cert.csr -CA ca.pem -CAkey ca-key.pem \
|
||||
-CAcreateserial -out cert.pem -days 365 -extensions v3_req -extfile openssl.cnf
|
||||
|
||||
sudo mkdir -p /etc/docker/ssl
|
||||
sudo cp ca.pem /etc/docker/ssl/
|
||||
sudo cp cert.pem /etc/docker/ssl/
|
||||
sudo cp key.pem /etc/docker/ssl/
|
||||
|
||||
# Apply localized settings to services
|
||||
sudo mkdir -p /etc/systemd/system/{docker,swarm-agent,swarm-manager}.service.d
|
||||
|
||||
sudo mv /home/core/keys/dockerservice.cnf /etc/systemd/system/docker.service.d/10-docker-service.conf
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker.service
|
||||
sudo systemctl start swarm-agent.service
|
||||
sudo systemctl start swarm-manager.service
|
@ -0,0 +1,2 @@
|
||||
[Service]
|
||||
Environment="DOCKER_OPTS=-H=0.0.0.0:2376 -H unix:///var/run/docker.sock --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem --cluster-advertise {{app_env.net_device}}:2376 --cluster-store etcd://127.0.0.1:2379/docker"
|
@ -0,0 +1,8 @@
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth, serverAuth
|
14
workloads/ansible/shade/dockerswarm/roles/prov_destroy/tasks/main.yml
Executable file
14
workloads/ansible/shade/dockerswarm/roles/prov_destroy/tasks/main.yml
Executable file
@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Remove docker swarm nodes
|
||||
os_server:
|
||||
state: "absent"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: docker-swarm-{{ host_no }}
|
||||
key_name: "dockerswarm"
|
||||
timeout: 200
|
||||
security_groups: dockerswarm_sg
|
||||
meta:
|
||||
hostname: docker-swarm-{{ host_no }}
|
21
workloads/ansible/shade/dockerswarm/roles/vm_apply/tasks/main.yml
Executable file
21
workloads/ansible/shade/dockerswarm/roles/vm_apply/tasks/main.yml
Executable file
@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Create docker swarm nodes
|
||||
os_server:
|
||||
state: "present"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: docker-swarm-{{ host_no }}
|
||||
image: "{{ app_env.image_name }}"
|
||||
key_name: "dockerswarm"
|
||||
timeout: 200
|
||||
flavor: "{{ hostvars.cloud.openstack_flavors[0].id }}"
|
||||
network: "{{ app_env.private_net_name }}"
|
||||
auto_ip: yes
|
||||
userdata: "{{ lookup('file', '/tmp/' +env+ '/cloudinit') }}"
|
||||
security_groups: dockerswarm_sg
|
||||
meta:
|
||||
hostname: docker-swarm-{{ host_no }}
|
||||
register: swarmnode
|
||||
|
1
workloads/ansible/shade/dockerswarm/roles/vm_destroy/tasks/main.yml
Executable file
1
workloads/ansible/shade/dockerswarm/roles/vm_destroy/tasks/main.yml
Executable file
@ -0,0 +1 @@
|
||||
---
|
33
workloads/ansible/shade/dockerswarm/site.yml
Executable file
33
workloads/ansible/shade/dockerswarm/site.yml
Executable file
@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: prepare for provision
|
||||
hosts: cloud
|
||||
connection: local
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- "prep_{{ action }}"
|
||||
|
||||
- name: provision swarm nodes
|
||||
hosts: dockerswarm
|
||||
serial: 1
|
||||
connection: local
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- "vm_{{ action }}"
|
||||
|
||||
- name: setup swarm nodes
|
||||
hosts: dockerswarm
|
||||
connection: local
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- "prov_{{ action }}"
|
||||
|
||||
- name: post provisioning
|
||||
hosts: cloud
|
||||
connection: local
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- "post_{{ action }}"
|
23
workloads/ansible/shade/dockerswarm/vars/bluebox.yml
Executable file
23
workloads/ansible/shade/dockerswarm/vars/bluebox.yml
Executable file
@ -0,0 +1,23 @@
|
||||
---
|
||||
horizon_url: "https://salesdemo-sjc.openstack.blueboxgrid.com"
|
||||
|
||||
auth: {
|
||||
auth_url: "https://salesdemo-sjc.openstack.blueboxgrid.com:5000/v2.0",
|
||||
username: "litong01",
|
||||
password: "{{ password }}",
|
||||
project_name: "Interop"
|
||||
}
|
||||
|
||||
app_env: {
|
||||
image_name: "coreos",
|
||||
private_net_name: "interopnet",
|
||||
net_device: "eth0",
|
||||
flavor_name: "m1.small",
|
||||
swarm_version: "latest",
|
||||
swarm_size: 3,
|
||||
region_name: "",
|
||||
availability_zone: "",
|
||||
validate_certs: True,
|
||||
fqdn: "swarm.example.com",
|
||||
public_key_file: "/home/tong/.ssh/id_rsa.pub"
|
||||
}
|
21
workloads/ansible/shade/dockerswarm/vars/dreamhost.yml
Executable file
21
workloads/ansible/shade/dockerswarm/vars/dreamhost.yml
Executable file
@ -0,0 +1,21 @@
|
||||
---
|
||||
horizon_url: "https://iad2.dreamcompute.com"
|
||||
|
||||
auth: {
|
||||
auth_url: "https://iad2.dream.io:5000/v2.0",
|
||||
username: "stemaf4",
|
||||
password: "{{ password }}",
|
||||
project_name: "dhc2131831"
|
||||
}
|
||||
|
||||
app_env: {
|
||||
region_name: "RegionOne",
|
||||
image_name: "CoreOS Sept16",
|
||||
private_net_name: "",
|
||||
flavor_name: "gp1.subsonic",
|
||||
public_key_file: "/home/reed/.ssh/id_rsa.pub",
|
||||
swarm_version: "latest",
|
||||
swarm_size: 3,
|
||||
fqdn: "swarm.example.com",
|
||||
net_device: "eth0",
|
||||
}
|
24
workloads/ansible/shade/dockerswarm/vars/leap.yml
Executable file
24
workloads/ansible/shade/dockerswarm/vars/leap.yml
Executable file
@ -0,0 +1,24 @@
|
||||
---
|
||||
horizon_url: "http://9.30.217.9"
|
||||
|
||||
auth: {
|
||||
auth_url: "http://9.30.217.9:5000/v3",
|
||||
username: "demo",
|
||||
password: "{{ password }}",
|
||||
domain_name: "default",
|
||||
project_name: "demo"
|
||||
}
|
||||
|
||||
app_env: {
|
||||
image_name: "coreos",
|
||||
private_net_name: "Bluebox",
|
||||
net_device: "eth0",
|
||||
flavor_name: "m1.small",
|
||||
swarm_version: "latest",
|
||||
swarm_size: 3,
|
||||
region_name: "RegionOne",
|
||||
availability_zone: "nova",
|
||||
validate_certs: False,
|
||||
fqdn: "swarm.example.com",
|
||||
public_key_file: "/home/tong/.ssh/id_rsa.pub"
|
||||
}
|
24
workloads/ansible/shade/dockerswarm/vars/osic.yml
Executable file
24
workloads/ansible/shade/dockerswarm/vars/osic.yml
Executable file
@ -0,0 +1,24 @@
|
||||
---
|
||||
horizon_url: "https://cloud1.osic.org"
|
||||
|
||||
auth: {
|
||||
auth_url: "https://cloud1.osic.org:5000/v3",
|
||||
username: "litong01",
|
||||
password: "{{ password }}",
|
||||
domain_name: "default",
|
||||
project_name: "interop_challenge"
|
||||
}
|
||||
|
||||
app_env: {
|
||||
image_name: "coreos",
|
||||
private_net_name: "interopnet",
|
||||
net_device: "eth0",
|
||||
flavor_name: "m1.small",
|
||||
swarm_version: "latest",
|
||||
swarm_size: 3,
|
||||
region_name: "",
|
||||
availability_zone: "",
|
||||
validate_certs: True,
|
||||
fqdn: "swarm.example.com",
|
||||
public_key_file: "/home/tong/.ssh/id_rsa.pub"
|
||||
}
|
23
workloads/ansible/shade/dockerswarm/vars/ovh.yml
Executable file
23
workloads/ansible/shade/dockerswarm/vars/ovh.yml
Executable file
@ -0,0 +1,23 @@
|
||||
---
|
||||
horizon_url: "https://horizon.cloud.ovh.net"
|
||||
|
||||
auth: {
|
||||
auth_url: "https://auth.cloud.ovh.net/v2.0",
|
||||
username: "SXYbmFhC4aqQ",
|
||||
password: "{{ password }}",
|
||||
project_name: "2487610196015734"
|
||||
}
|
||||
|
||||
app_env: {
|
||||
image_name: "coreos",
|
||||
private_net_name: "",
|
||||
net_device: "eth0",
|
||||
flavor_name: "eg-15-ssd",
|
||||
swarm_version: "latest",
|
||||
swarm_size: 3,
|
||||
region_name: "BHS1",
|
||||
availability_zone: "",
|
||||
validate_certs: True,
|
||||
fqdn: "swarm.example.com",
|
||||
public_key_file: "/home/tong/.ssh/id_rsa.pub"
|
||||
}
|
6
workloads/ansible/shade/lampstack/.gitignore
vendored
Executable file
6
workloads/ansible/shade/lampstack/.gitignore
vendored
Executable file
@ -0,0 +1,6 @@
|
||||
*.out
|
||||
vars/*
|
||||
*/**/*.log
|
||||
*/**/.DS_Store
|
||||
*/**/._
|
||||
*/**/*.tfstate*
|
3
workloads/ansible/shade/lampstack/ansible.cfg
Normal file
3
workloads/ansible/shade/lampstack/ansible.cfg
Normal file
@ -0,0 +1,3 @@
|
||||
[defaults]
|
||||
inventory = ./hosts
|
||||
host_key_checking = False
|
7
workloads/ansible/shade/lampstack/group_vars/all.yml
Executable file
7
workloads/ansible/shade/lampstack/group_vars/all.yml
Executable file
@ -0,0 +1,7 @@
|
||||
---
|
||||
db_user: "wpdbuser"
|
||||
db_pass: "{{ lookup('password',
|
||||
'/tmp/sqlpassword chars=ascii_letters,digits length=8') }}"
|
||||
|
||||
proxy_env: {
|
||||
}
|
1
workloads/ansible/shade/lampstack/hosts
Normal file
1
workloads/ansible/shade/lampstack/hosts
Normal file
@ -0,0 +1 @@
|
||||
cloud ansible_host=127.0.0.1 ansible_python_interpreter=python
|
193
workloads/ansible/shade/lampstack/roles/apply/tasks/main.yml
Executable file
193
workloads/ansible/shade/lampstack/roles/apply/tasks/main.yml
Executable file
@ -0,0 +1,193 @@
|
||||
---
|
||||
- name: Get start timestamp
|
||||
set_fact: starttime="{{ ansible_date_time }}"
|
||||
|
||||
- name: Retrieve specified flavor
|
||||
os_flavor_facts:
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: "{{ app_env.flavor_name }}"
|
||||
|
||||
- name: Create a key-pair
|
||||
os_keypair:
|
||||
state: "present"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: "lampstack"
|
||||
public_key_file: "{{ app_env.public_key_file }}"
|
||||
|
||||
- name: Create volume
|
||||
os_volume:
|
||||
state: present
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
size: "{{ app_env.volume_size }}"
|
||||
wait: yes
|
||||
display_name: db_volume
|
||||
|
||||
- name: Create security group
|
||||
os_security_group:
|
||||
state: present
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: lampstack_sg
|
||||
description: security group for lampstack
|
||||
|
||||
- name: Add security rules
|
||||
os_security_group_rule:
|
||||
state: present
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
security_group: lampstack_sg
|
||||
protocol: "{{ item.protocol }}"
|
||||
direction: "{{ item.dir }}"
|
||||
port_range_min: "{{ item.p_min }}"
|
||||
port_range_max: "{{ item.p_max }}"
|
||||
remote_ip_prefix: 0.0.0.0/0
|
||||
with_items:
|
||||
- { p_min: 22, p_max: 22, dir: ingress, protocol: tcp }
|
||||
- { p_min: 80, p_max: 80, dir: ingress, protocol: tcp }
|
||||
- { p_min: 2049, p_max: 2049, dir: ingress, protocol: tcp }
|
||||
- { p_min: 2049, p_max: 2049, dir: egress, protocol: tcp }
|
||||
- { p_min: 3306, p_max: 3306, dir: ingress, protocol: tcp }
|
||||
- { p_min: -1, p_max: -1, dir: ingress, protocol: icmp }
|
||||
- { p_min: -1, p_max: -1, dir: egress, protocol: icmp }
|
||||
|
||||
- name: Create database node
|
||||
os_server:
|
||||
state: "present"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: database
|
||||
image: "{{ app_env.image_name }}"
|
||||
key_name: "lampstack"
|
||||
timeout: 200
|
||||
flavor: "{{ app_env.flavor_name }}"
|
||||
network: "{{ app_env.private_net_name }}"
|
||||
userdata: "{{ lookup('file', 'templates/userdata.j2') }}"
|
||||
config_drive: "{{ app_env.config_drive | default('no') }}"
|
||||
security_groups: lampstack_sg
|
||||
floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
|
||||
meta:
|
||||
hostname: database
|
||||
register: database
|
||||
|
||||
- name: Add database node to the dbservers host group
|
||||
add_host:
|
||||
name: "{{ database.openstack.public_v4 }}"
|
||||
groups: dbservers
|
||||
when: database.openstack.public_v4 != ""
|
||||
|
||||
- name: Add database node to the dbservers host group
|
||||
add_host:
|
||||
name: "{{ database.openstack.private_v4 }}"
|
||||
groups: dbservers
|
||||
when: database.openstack.public_v4 == ""
|
||||
|
||||
- name: Create balancer node
|
||||
os_server:
|
||||
state: "present"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: balancer
|
||||
image: "{{ app_env.image_name }}"
|
||||
key_name: "lampstack"
|
||||
timeout: 200
|
||||
flavor: "{{ app_env.flavor_name }}"
|
||||
network: "{{ app_env.private_net_name }}"
|
||||
userdata: "{{ lookup('file', 'templates/userdata.j2') }}"
|
||||
config_drive: "{{ app_env.config_drive | default('no') }}"
|
||||
security_groups: lampstack_sg
|
||||
floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
|
||||
meta:
|
||||
hostname: balancer
|
||||
register: balancer
|
||||
|
||||
- name: Add balancer node to the balancers host group
|
||||
add_host:
|
||||
name: "{{ balancer.openstack.public_v4 }}"
|
||||
groups: balancers
|
||||
when: balancer.openstack.public_v4 != ""
|
||||
|
||||
- name: Add balancer node to the balancers host group
|
||||
add_host:
|
||||
name: "{{ balancer.openstack.private_v4 }}"
|
||||
groups: balancers
|
||||
when: balancer.openstack.public_v4 == ""
|
||||
|
||||
- name: Create a volume for database to save data
|
||||
os_server_volume:
|
||||
state: present
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
server: database
|
||||
volume: db_volume
|
||||
device: "{{ app_env.block_device_name }}"
|
||||
|
||||
- name: Create web server nodes to host application
|
||||
os_server:
|
||||
state: "present"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: apache-{{ item }}
|
||||
image: "{{ app_env.image_name }}"
|
||||
key_name: "lampstack"
|
||||
timeout: 200
|
||||
flavor: "{{ app_env.flavor_name }}"
|
||||
network: "{{ app_env.private_net_name }}"
|
||||
floating_ip_pools: "{{ app_env.public_net_name | default(omit) }}"
|
||||
userdata: "{{ lookup('file', 'templates/userdata.j2') }}"
|
||||
config_drive: "{{ app_env.config_drive | default('no') }}"
|
||||
security_groups: lampstack_sg
|
||||
meta:
|
||||
hostname: apache-{{ item }}
|
||||
with_sequence: count={{ app_env.stack_size - 2 }}
|
||||
register: webserver
|
||||
|
||||
- name: Add web servers to webservers host group
|
||||
add_host:
|
||||
name: "{{ item.openstack.public_v4 }}"
|
||||
groups: webservers
|
||||
when: item.openstack.public_v4 != ""
|
||||
with_items: "{{ webserver.results }}"
|
||||
no_log: True
|
||||
|
||||
- name: Add web servers to webservers host group
|
||||
add_host:
|
||||
name: "{{ item.openstack.private_v4 }}"
|
||||
groups: webservers
|
||||
when: item.openstack.public_v4 == ""
|
||||
with_items: "{{ webserver.results }}"
|
||||
no_log: True
|
||||
|
||||
- name: Add one web servers to wps host group
|
||||
add_host:
|
||||
name: "{{ webserver.results[0].openstack.public_v4 }}"
|
||||
groups: wps
|
||||
when: webserver.results[0].openstack.public_v4 != ""
|
||||
no_log: True
|
||||
|
||||
- name: Add one web servers to wps host group
|
||||
add_host:
|
||||
name: "{{ webserver.results[0].openstack.private_v4 }}"
|
||||
groups: wps
|
||||
when: webserver.results[0].openstack.public_v4 == ""
|
||||
no_log: True
|
4
workloads/ansible/shade/lampstack/roles/apply/templates/userdata.j2
Executable file
4
workloads/ansible/shade/lampstack/roles/apply/templates/userdata.j2
Executable file
@ -0,0 +1,4 @@
|
||||
#cloud-config
|
||||
runcmd:
|
||||
- addr=$(ip -4 -o addr | grep -v '127.0.0.1' | awk 'NR==1{print $4}' | cut -d '/' -f 1)
|
||||
- echo $addr `hostname` >> /etc/hosts
|
53
workloads/ansible/shade/lampstack/roles/balancer/tasks/main.yml
Executable file
53
workloads/ansible/shade/lampstack/roles/balancer/tasks/main.yml
Executable file
@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: Haproxy install
|
||||
package:
|
||||
name="{{ item }}"
|
||||
state=latest
|
||||
update_cache=yes
|
||||
with_items:
|
||||
- haproxy
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Haproxy install
|
||||
package:
|
||||
name="{{ item }}"
|
||||
state=latest
|
||||
with_items:
|
||||
- haproxy
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Enable haproxy service
|
||||
replace:
|
||||
dest: /etc/default/haproxy
|
||||
regexp: "ENABLED=0"
|
||||
replace: "ENABLED=1"
|
||||
backup: no
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Place the haproxy configuration file
|
||||
copy:
|
||||
src: templates/haproxy.cfg.j2
|
||||
dest: /etc/haproxy/haproxy.cfg
|
||||
owner: root
|
||||
group: root
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Place the haproxy configuration file
|
||||
copy:
|
||||
src: templates/haproxy_fedora.cfg.j2
|
||||
dest: /etc/haproxy/haproxy.cfg
|
||||
owner: root
|
||||
group: root
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Add web servers to the haproxy
|
||||
lineinfile:
|
||||
dest: /etc/haproxy/haproxy.cfg
|
||||
line: " server ws{{ item[0].openstack[item[1]] }} {{ item[0].openstack[item[1]] }}:80 check"
|
||||
with_nested:
|
||||
- "{{ hostvars.cloud.webserver.results }}"
|
||||
- ["private_v4", "public_v4"]
|
||||
when: item[0].openstack[item[1]] != ''
|
||||
no_log: True
|
||||
|
||||
- service: name=haproxy state=restarted enabled=yes
|
33
workloads/ansible/shade/lampstack/roles/balancer/templates/haproxy.cfg.j2
Executable file
33
workloads/ansible/shade/lampstack/roles/balancer/templates/haproxy.cfg.j2
Executable file
@ -0,0 +1,33 @@
|
||||
global
|
||||
log /dev/log local0
|
||||
log /dev/log local1 notice
|
||||
chroot /var/lib/haproxy
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
option redispatch
|
||||
retries 3
|
||||
contimeout 5000
|
||||
clitimeout 50000
|
||||
srvtimeout 50000
|
||||
errorfile 400 /etc/haproxy/errors/400.http
|
||||
errorfile 403 /etc/haproxy/errors/403.http
|
||||
errorfile 408 /etc/haproxy/errors/408.http
|
||||
errorfile 500 /etc/haproxy/errors/500.http
|
||||
errorfile 502 /etc/haproxy/errors/502.http
|
||||
errorfile 503 /etc/haproxy/errors/503.http
|
||||
errorfile 504 /etc/haproxy/errors/504.http
|
||||
|
||||
listen webfarm 0.0.0.0:80
|
||||
mode http
|
||||
stats enable
|
||||
stats uri /haproxy?stats
|
||||
balance roundrobin
|
||||
option httpclose
|
||||
option forwardfor
|
@ -0,0 +1,34 @@
|
||||
global
|
||||
log /dev/log local0
|
||||
log /dev/log local1 notice
|
||||
chroot /var/lib/haproxy
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
option redispatch
|
||||
retries 3
|
||||
contimeout 5000
|
||||
clitimeout 50000
|
||||
srvtimeout 50000
|
||||
errorfile 400 /usr/share/haproxy/400.http
|
||||
errorfile 403 /usr/share/haproxy/403.http
|
||||
errorfile 408 /usr/share/haproxy/408.http
|
||||
errorfile 500 /usr/share/haproxy/500.http
|
||||
errorfile 502 /usr/share/haproxy/502.http
|
||||
errorfile 503 /usr/share/haproxy/503.http
|
||||
errorfile 504 /usr/share/haproxy/504.http
|
||||
|
||||
listen webfarm
|
||||
bind 0.0.0.0:80
|
||||
mode http
|
||||
stats enable
|
||||
stats uri /haproxy?stats
|
||||
balance roundrobin
|
||||
option httpclose
|
||||
option forwardfor
|
23
workloads/ansible/shade/lampstack/roles/cleaner/tasks/apply.yml
Executable file
23
workloads/ansible/shade/lampstack/roles/cleaner/tasks/apply.yml
Executable file
@ -0,0 +1,23 @@
|
||||
---
|
||||
- os_floating_ip:
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
state: absent
|
||||
floating_ip_address: "{{ database.openstack.public_v4 }}"
|
||||
server: "{{ database.openstack.name }}"
|
||||
when: database.openstack.private_v4 != ""
|
||||
no_log: True
|
||||
|
||||
- os_floating_ip:
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
state: absent
|
||||
floating_ip_address: "{{ item.openstack.public_v4 }}"
|
||||
server: "{{ item.openstack.name }}"
|
||||
with_items: "{{ webserver.results }}"
|
||||
when: item.openstack.private_v4 != ""
|
||||
no_log: True
|
1
workloads/ansible/shade/lampstack/roles/cleaner/tasks/destroy.yml
Executable file
1
workloads/ansible/shade/lampstack/roles/cleaner/tasks/destroy.yml
Executable file
@ -0,0 +1 @@
|
||||
---
|
@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Wait until server is up and runnning
|
||||
local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10
|
||||
become: no
|
||||
|
||||
- name: Check if running on Fedora
|
||||
raw: "[ -f /etc/fedora-release ]"
|
||||
register: fedora_release
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Install python2 for Ansible
|
||||
raw: dnf install -y python2 python2-dnf libselinux-python
|
||||
register: result
|
||||
until: result|success
|
||||
when: fedora_release.rc == 0
|
||||
|
||||
- name: Set SELinux to permisive
|
||||
selinux: policy=targeted state=permissive
|
||||
when: fedora_release.rc == 0
|
164
workloads/ansible/shade/lampstack/roles/database/tasks/main.yml
Executable file
164
workloads/ansible/shade/lampstack/roles/database/tasks/main.yml
Executable file
@ -0,0 +1,164 @@
|
||||
---
|
||||
- stat: path=/tmp/diskflag
|
||||
register: diskflag
|
||||
|
||||
- name: update apt cache
|
||||
apt: update_cache=yes
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: install scsitools
|
||||
package: name=scsitools state=latest
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: install sg3_utils
|
||||
package: name=sg3_utils state=latest
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- shell: /sbin/rescan-scsi-bus
|
||||
when: diskflag.stat.exists == false and ansible_distribution == 'Ubuntu'
|
||||
|
||||
- shell: /bin/rescan-scsi-bus.sh
|
||||
when: diskflag.stat.exists == false and ansible_distribution == 'Fedora'
|
||||
|
||||
- shell: parted -s "{{ app_env.block_device_name }}" mklabel msdos
|
||||
when: diskflag.stat.exists == false
|
||||
|
||||
- shell: parted -s "{{ app_env.block_device_name }}" mkpart primary ext4 1049kb 100%
|
||||
when: diskflag.stat.exists == false
|
||||
|
||||
- lineinfile: dest=/tmp/diskflag line="disk is now partitioned!" create=yes
|
||||
|
||||
- filesystem: fstype=ext4 dev="{{ app_env.block_device_name }}1"
|
||||
- mount: name=/storage src="{{ app_env.block_device_name }}1" fstype=ext4 state=mounted
|
||||
|
||||
- shell: ip -4 -o addr | grep -v '127.0.0.1' | awk 'NR==1{print $4}' | cut -d '/' -f 1
|
||||
register: local_ip
|
||||
|
||||
- name: Creates share directory for database
|
||||
file: path=/storage/sqldatabase state=directory
|
||||
|
||||
- name: Creates share directory for wpcontent
|
||||
file: path=/storage/wpcontent state=directory
|
||||
|
||||
- name: Creates directory for database mounting point
|
||||
file: path=/var/lib/mysql state=directory
|
||||
|
||||
- name: Install NFS server
|
||||
package:
|
||||
name=nfs-kernel-server
|
||||
state=latest
|
||||
update_cache=yes
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Install NFS server
|
||||
package: name=nfs-utils state=latest
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Setup NFS database access
|
||||
lineinfile:
|
||||
dest: /etc/exports
|
||||
line: "/storage/sqldatabase {{ local_ip.stdout }}/32(rw,sync,no_root_squash,no_subtree_check)"
|
||||
|
||||
- name: Setup NFS webserver access
|
||||
lineinfile:
|
||||
dest: /etc/exports
|
||||
line: "/storage/wpcontent {{ item[0].openstack[item[1]] }}/32(rw,sync,no_root_squash,no_subtree_check)"
|
||||
with_nested:
|
||||
- "{{ hostvars.cloud.webserver.results }}"
|
||||
- ["private_v4", "public_v4"]
|
||||
when: item[0].openstack[item[1]] != ''
|
||||
no_log: True
|
||||
|
||||
- name: nfs export
|
||||
shell: exportfs -a
|
||||
|
||||
- service: name=nfs-kernel-server state=restarted enabled=yes
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- service: name=nfs-server state=restarted enabled=yes
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Mount the database data directory
|
||||
mount:
|
||||
name: /var/lib/mysql
|
||||
src: "{{ local_ip.stdout }}:/storage/sqldatabase"
|
||||
state: mounted
|
||||
fstype: nfs
|
||||
|
||||
- name: Install mysql and libraries
|
||||
package:
|
||||
name="{{ item }}"
|
||||
state=latest
|
||||
update_cache=yes
|
||||
with_items:
|
||||
- mysql-server
|
||||
- python-mysqldb
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Install mysql and libraries
|
||||
package:
|
||||
name="{{ item }}"
|
||||
state=latest
|
||||
with_items:
|
||||
- mariadb-server
|
||||
- python2-mysql
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- service: name=mysql state=stopped enabled=yes
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- service: name=mariadb state=stopped enabled=yes
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- stat: path=/etc/mysql/my.cnf
|
||||
register: mysqlflag
|
||||
|
||||
- name: Configure mysql 5.5
|
||||
replace:
|
||||
dest: "/etc/mysql/my.cnf"
|
||||
regexp: '^bind-address[ \t]*=[ ]*127\.0\.0\.1'
|
||||
replace: "bind-address = {{ local_ip.stdout }}"
|
||||
backup: no
|
||||
when: mysqlflag.stat.exists == true
|
||||
|
||||
- stat: path=/etc/mysql/mysql.conf.d/mysqld.cnf
|
||||
register: mysqlflag
|
||||
|
||||
- name: Configure mysql 5.6+
|
||||
replace:
|
||||
dest: "/etc/mysql/mysql.conf.d/mysqld.cnf"
|
||||
replace: "bind-address = {{ local_ip.stdout }}"
|
||||
backup: no
|
||||
when: mysqlflag.stat.exists == true
|
||||
|
||||
- stat: path=/etc/my.cnf
|
||||
register: mariadbflag
|
||||
|
||||
- name: Configure MariaDB 10.1
|
||||
ini_file:
|
||||
dest=/etc/my.cnf
|
||||
section=mysqld
|
||||
option=bind-address
|
||||
value={{ local_ip.stdout }}
|
||||
when: mariadbflag.stat.exists == true
|
||||
|
||||
- service: name=mysql state=started enabled=yes
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- service: name=mariadb state=started enabled=yes
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: create wordpress database
|
||||
mysql_db:
|
||||
name: "decision2016"
|
||||
state: "{{ item }}"
|
||||
with_items:
|
||||
- ['present', 'absent', 'present']
|
||||
|
||||
- name: Add a user
|
||||
mysql_user:
|
||||
name: "{{ db_user }}"
|
||||
password: "{{ db_pass }}"
|
||||
host: "%"
|
||||
priv: 'decision2016.*:ALL'
|
||||
state: present
|
79
workloads/ansible/shade/lampstack/roles/destroy/tasks/main.yml
Executable file
79
workloads/ansible/shade/lampstack/roles/destroy/tasks/main.yml
Executable file
@ -0,0 +1,79 @@
|
||||
---
|
||||
- name: Get start timestamp
|
||||
set_fact: starttime="{{ ansible_date_time }}"
|
||||
|
||||
- name: Delete key pairs
|
||||
os_keypair:
|
||||
state: "absent"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: "lampstack"
|
||||
public_key_file: "{{ app_env.public_key_file }}"
|
||||
|
||||
- name: Delete database node
|
||||
os_server:
|
||||
state: "absent"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: database
|
||||
image: "{{ app_env.image_name }}"
|
||||
key_name: "lampstack"
|
||||
timeout: 200
|
||||
network: "{{ app_env.private_net_name }}"
|
||||
meta:
|
||||
hostname: database
|
||||
|
||||
- name: Delete balancer node
|
||||
os_server:
|
||||
state: "absent"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: balancer
|
||||
image: "{{ app_env.image_name }}"
|
||||
key_name: "lampstack"
|
||||
timeout: 200
|
||||
network: "{{ app_env.private_net_name }}"
|
||||
meta:
|
||||
hostname: balancer
|
||||
|
||||
- name: Delete web server nodes
|
||||
os_server:
|
||||
state: "absent"
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: apache-{{ item }}
|
||||
image: "{{ app_env.image_name }}"
|
||||
key_name: "lampstack"
|
||||
timeout: 200
|
||||
network: "{{ app_env.private_net_name }}"
|
||||
meta:
|
||||
hostname: apache-{{ item }}
|
||||
with_sequence: count={{ app_env.stack_size - 2 }}
|
||||
|
||||
- name: Delete security group
|
||||
os_security_group:
|
||||
state: absent
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
name: lampstack_sg
|
||||
description: secuirty group for lampstack
|
||||
|
||||
- name: Delete cinder volume
|
||||
os_volume:
|
||||
state: absent
|
||||
auth: "{{ auth }}"
|
||||
region_name: "{{ app_env.region_name }}"
|
||||
availability_zone: "{{ app_env.availability_zone }}"
|
||||
validate_certs: "{{ app_env.validate_certs }}"
|
||||
wait: yes
|
||||
display_name: db_volume
|
147
workloads/ansible/shade/lampstack/roles/webserver/tasks/main.yml
Executable file
147
workloads/ansible/shade/lampstack/roles/webserver/tasks/main.yml
Executable file
@ -0,0 +1,147 @@
|
||||
---
|
||||
- name: Apache and php 5
|
||||
package:
|
||||
name="{{ item }}"
|
||||
state=latest
|
||||
update_cache=yes
|
||||
with_items:
|
||||
- apache2
|
||||
- php5
|
||||
- php5-mysql
|
||||
- nfs-common
|
||||
- unzip
|
||||
- ssmtp
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Apache and php 5
|
||||
package:
|
||||
name="{{ item }}"
|
||||
state=latest
|
||||
with_items:
|
||||
- httpd
|
||||
- php
|
||||
- php-mysqlnd
|
||||
- nfs-utils
|
||||
- unzip
|
||||
- ssmtp
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- shell: rm -rf /var/www/html/index.html
|
||||
args:
|
||||
warn: no
|
||||
|
||||
- name: Creates share directory for wpcontent
|
||||
file:
|
||||
path: /var/www/html/wp-content/uploads
|
||||
state: directory
|
||||
owner: www-data
|
||||
group: www-data
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Creates share directory for wpcontent
|
||||
file:
|
||||
path: /var/www/html/wp-content/uploads
|
||||
state: directory
|
||||
owner: apache
|
||||
group: apache
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Mount the directory using private IP
|
||||
mount:
|
||||
name: /var/www/html/wp-content/uploads
|
||||
src: "{{ hostvars.cloud.database.openstack.private_v4 }}:/storage/wpcontent"
|
||||
state: mounted
|
||||
fstype: nfs
|
||||
when: hostvars.cloud.database.openstack.private_v4 != ""
|
||||
|
||||
- name: Mount the directory using public IP
|
||||
mount:
|
||||
name: /var/www/html/wp-content/uploads
|
||||
src: "{{ hostvars.cloud.database.openstack.public_v4 }}:/storage/wpcontent"
|
||||
state: mounted
|
||||
fstype: nfs
|
||||
when: hostvars.cloud.database.openstack.private_v4 == ""
|
||||
|
||||
- lineinfile: dest=/etc/apache2/apache2.conf line="ServerName localhost"
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- lineinfile: dest=/etc/httpd/conf/httpd.conf line="ServerName localhost"
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Download wordpress
|
||||
get_url:
|
||||
url: "{{ app_env.wp_latest | default('https://wordpress.org/latest.tar.gz') }}"
|
||||
dest: /var/www/latest.tar.gz
|
||||
|
||||
- name: Unpack latest wordpress
|
||||
shell: tar -xf /var/www/latest.tar.gz -C /var/www/html --strip-components=1
|
||||
args:
|
||||
warn: no
|
||||
|
||||
- name: Create wordpress configuration
|
||||
shell: cp /var/www/html/wp-config-sample.php /var/www/html/wp-config.php
|
||||
args:
|
||||
warn: no
|
||||
|
||||
- name: Configure wordpress database, username and password
|
||||
replace:
|
||||
dest: /var/www/html/wp-config.php
|
||||
regexp: "'{{ item.then }}'"
|
||||
replace: "'{{ item.now }}'"
|
||||
backup: no
|
||||
with_items:
|
||||
- { then: 'database_name_here', now: 'decision2016' }
|
||||
- { then: 'username_here', now: "{{ db_user }}" }
|
||||
- { then: 'password_here', now: "{{ db_pass }}" }
|
||||
|
||||
- name: Configure wordpress network access using private IP
|
||||
replace:
|
||||
dest: /var/www/html/wp-config.php
|
||||
regexp: "'localhost'"
|
||||
replace: "'{{ hostvars.cloud.database.openstack.private_v4 }}'"
|
||||
backup: no
|
||||
when: hostvars.cloud.database.openstack.private_v4 != ""
|
||||
|
||||
- name: Configure wordpress network access using public IP
|
||||
replace:
|
||||
dest: /var/www/html/wp-config.php
|
||||
regexp: "'localhost'"
|
||||
replace: "'{{ hostvars.cloud.database.openstack.public_v4 }}'"
|
||||
backup: no
|
||||
when: hostvars.cloud.database.openstack.private_v4 == ""
|
||||
|
||||
- name: Change ownership of wordpress
|
||||
shell: chown -R www-data:www-data /var/www/html
|
||||
args:
|
||||
warn: no
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Change ownership of wordpress
|
||||
shell: chown -R apache:apache /var/www/html
|
||||
args:
|
||||
warn: no
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- service: name=apache2 state=restarted enabled=yes
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- service: name=httpd state=restarted enabled=yes
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Install wordpress command line tool
|
||||
get_url:
|
||||
url: "{{ app_env.wp_cli | default('https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar') }}"
|
||||
dest: /usr/local/bin/wp
|
||||
mode: "a+x"
|
||||
force: no
|
||||
|
||||
- name: Download a wordpress theme
|
||||
get_url:
|
||||
url: "{{ app_env.wp_theme }}"
|
||||
dest: /tmp/wptheme.zip
|
||||
force: yes
|
||||
|
||||
- name: Install the theme
|
||||
shell: unzip -o -q /tmp/wptheme.zip -d /var/www/html/wp-content/themes
|
||||
args:
|
||||
warn: no
|
73
workloads/ansible/shade/lampstack/roles/wordpress/tasks/main.yml
Executable file
73
workloads/ansible/shade/lampstack/roles/wordpress/tasks/main.yml
Executable file
@ -0,0 +1,73 @@
|
||||
---
|
||||
- name: Install wordpress
|
||||
command: >
|
||||
wp core install --path=/var/www/html
|
||||
--url="http://{{ hostvars.cloud.balancer.openstack.public_v4 }}"
|
||||
--title='OpenStack Interop Challenge'
|
||||
--admin_user=wpuser
|
||||
--admin_password="{{ db_pass }}"
|
||||
--admin_email='interop@openstack.org'
|
||||
when: hostvars.cloud.balancer.openstack.public_v4 != ""
|
||||
|
||||
- name: Install wordpress
|
||||
command: >
|
||||
wp core install --path=/var/www/html
|
||||
--url="http://{{ hostvars.cloud.balancer.openstack.private_v4 }}"
|
||||
--title='OpenStack Interop Challenge'
|
||||
--admin_user=wpuser
|
||||
--admin_password="{{ db_pass }}"
|
||||
--admin_email='interop@openstack.org'
|
||||
when: hostvars.cloud.balancer.openstack.public_v4 == ""
|
||||
|
||||
- name: Activate wordpress theme
|
||||
command: >
|
||||
wp --path=/var/www/html theme activate
|
||||
"{{ app_env.wp_theme.split('/').pop().split('.')[0] }}"
|
||||
|
||||
- name: Download wordpress importer plugin
|
||||
get_url:
|
||||
url: "{{ app_env.wp_importer | default('http://downloads.wordpress.org/plugin/wordpress-importer.0.6.3.zip') }}"
|
||||
dest: "/tmp/wordpress-importer.zip"
|
||||
force: "yes"
|
||||
|
||||
- name: Install wordpress importer plugin
|
||||
command: >
|
||||
sudo -u www-data wp --path=/var/www/html plugin install /tmp/wordpress-importer.zip --activate
|
||||
args:
|
||||
warn: "no"
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Install wordpress importer plugin
|
||||
command: >
|
||||
sudo -u apache /usr/local/bin/wp --path=/var/www/html plugin install /tmp/wordpress-importer.zip
|
||||
args:
|
||||
warn: "no"
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Enable wordpress importer plugin
|
||||
command: >
|
||||
sudo -u apache /usr/local/bin/wp --path=/var/www/html plugin activate wordpress-importer
|
||||
args:
|
||||
warn: "no"
|
||||
when: ansible_distribution == 'Fedora'
|
||||
|
||||
- name: Download wordpress sample posts
|
||||
get_url:
|
||||
url: "{{ app_env.wp_posts }}"
|
||||
dest: "/tmp/wpposts.zip"
|
||||
force: "yes"
|
||||
|
||||
- name: Unpack the posts
|
||||
command: unzip -o -q /tmp/wpposts.zip -d /tmp/posts
|
||||
args:
|
||||
warn: "no"
|
||||
|
||||
- name: Import wordpress posts
|
||||
command: >
|
||||
sudo -u www-data wp --path=/var/www/html import /tmp/posts/*.xml --authors=create --quiet
|
||||
when: ansible_distribution == 'Ubuntu'
|
||||
|
||||
- name: Import wordpress posts
|
||||
shell: >
|
||||
sudo -u apache /usr/local/bin/wp --path=/var/www/html import /tmp/posts/*.xml --authors=create --quiet
|
||||
when: ansible_distribution == 'Fedora'
|
96
workloads/ansible/shade/lampstack/site.yml
Executable file
96
workloads/ansible/shade/lampstack/site.yml
Executable file
@ -0,0 +1,96 @@
|
||||
---
|
||||
- name: provision servers
|
||||
hosts: cloud
|
||||
connection: local
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- "{{ action }}"
|
||||
|
||||
- name: Install python2 for ansible to work
|
||||
hosts: dbservers, webservers, balancers, wps
|
||||
gather_facts: false
|
||||
user: "{{ app_env.ssh_user }}"
|
||||
become: true
|
||||
become_user: root
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- common
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- name: setup database
|
||||
hosts: dbservers
|
||||
user: "{{ app_env.ssh_user }}"
|
||||
become: true
|
||||
become_user: root
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- database
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- name: setup web servers
|
||||
hosts: webservers
|
||||
user: "{{ app_env.ssh_user }}"
|
||||
become: true
|
||||
become_user: root
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- webserver
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- name: setup load balancer servers
|
||||
hosts: balancers
|
||||
user: "{{ app_env.ssh_user }}"
|
||||
become: true
|
||||
become_user: root
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- balancer
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- name: install wordpress
|
||||
hosts: wps
|
||||
user: "{{ app_env.ssh_user }}"
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
roles:
|
||||
- wordpress
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- name: clean up resources
|
||||
hosts: cloud
|
||||
connection: local
|
||||
vars_files:
|
||||
- "vars/{{ env }}.yml"
|
||||
tasks:
|
||||
- include: "roles/cleaner/tasks/{{action}}.yml"
|
||||
roles:
|
||||
- cleaner
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
- name: Inform the installer
|
||||
hosts: cloud
|
||||
connection: local
|
||||
tasks:
|
||||
- debug:
|
||||
msg: >-
|
||||
Access wordpress at
|
||||
http://{{ hostvars.cloud.balancer.openstack.public_v4 }}.
|
||||
wordpress userid is wpuser, password is {{ db_pass }}
|
||||
when: hostvars.cloud.balancer is defined and
|
||||
hostvars.cloud.balancer.openstack.public_v4 != ""
|
||||
- debug:
|
||||
msg: >-
|
||||
Access wordpress at
|
||||
http://{{ hostvars.cloud.balancer.openstack.private_v4 }}.
|
||||
wordpress userid is wpuser, password is {{ db_pass }}
|
||||
when: hostvars.cloud.balancer is defined and
|
||||
hostvars.cloud.balancer.openstack.public_v4 == ""
|
||||
- debug:
|
||||
msg: >-
|
||||
The work load test started at {{ hostvars.cloud.starttime.time }},
|
||||
ended at {{ ansible_date_time.time }}
|
9
workloads/terraform/shade/dockerswarm/.gitignore
vendored
Normal file
9
workloads/terraform/shade/dockerswarm/.gitignore
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
*.tfvars
|
||||
*.tfstate
|
||||
*.backup
|
||||
|
||||
files/ssl/*.pem
|
||||
files/ssl/*.csr
|
||||
files/ssl/*.srl
|
||||
|
||||
templates/discovery_url
|
217
workloads/terraform/shade/dockerswarm/README.md
Normal file
217
workloads/terraform/shade/dockerswarm/README.md
Normal file
@ -0,0 +1,217 @@
|
||||
# Docker Swarm on Openstack with Terraform
|
||||
|
||||
Provision a Docker Swarm cluster with [Terraform](https://www.terraform.io) on Openstack.
|
||||
|
||||
## Status
|
||||
|
||||
This will install a fully HA docker swarm cluster on an Openstack Cloud. It is tested on a OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and should work on most modern installs of OpenStack that support the basic services.
|
||||
|
||||
It also supports overlay networks using the `docker network` command, see documentation below.
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- Upload a CoreOS image to glance and remember the image name.
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources required to run Docker Swarm. It is also used to deploy and provision the software requirements.
|
||||
|
||||
### Prep
|
||||
|
||||
#### Openstack Authentication
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This step is required by the terraform provisioner.
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
Ensure that you have your Openstack credentials loaded into Terraform environment variables. Likely via a command similar to:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
$ export TF_VAR_username=${OS_USERNAME}
|
||||
$ export TF_VAR_password=${OS_PASSWORD}
|
||||
$ export TF_VAR_tenant=${OS_TENANT_NAME}
|
||||
$ export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
|
||||
```
|
||||
|
||||
#### General Openstack Settings
|
||||
|
||||
By default security_groups will allow certain traffic from `0.0.0.0/0`. If you want to restrict it to a specific network you can set the terraform variable `whitelist_network`. I like to set it to only allow my current IP:
|
||||
|
||||
```
|
||||
$ export TF_VAR_whitelist_network=$(curl -s icanhazip.com)/32
|
||||
```
|
||||
|
||||
You also want to specify the name of your CoreOS `glance` image as well as flavor,networks, and keys. Since these do not change often I like to add them to `terraform.tfvars`:
|
||||
|
||||
```
|
||||
image_name = "coreos-alpha-884-0-0"
|
||||
network_name = "internal"
|
||||
net_device = "eth0"
|
||||
floatingip_pool = "external"
|
||||
flavor = "m1.medium"
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
```
|
||||
|
||||
_Remove the `*.tfvars` line from `.gitignore` if you wish to save this file into source control_
|
||||
|
||||
see `vars-openstack.tf` for the full list of variables you can set.
|
||||
|
||||
#### Docker Swarm Settings
|
||||
|
||||
You can alter the number of instances to be built and added to the cluster by modifying the `cluster_size` variable (default is 3).
|
||||
|
||||
If you have a FQDN you plan at pointing at one of more of the swarm-manager hosts you can set it via the `fqdn` variable.
|
||||
|
||||
Terraform will attempt to run `openssl` commands to create a CA and server/client certificates used to secure the docker/swarm endpoints. If you do not have `openssl` on your local machine or want to re-use existing CA / Client certificates you can set the TF variable `generate_ssl` to `0`. The certificates are created in `files/ssl`.
|
||||
|
||||
see `vars-swarm.tf` for the full list of variables you can set.
|
||||
|
||||
#### CoreOS Settings
|
||||
|
||||
Terraform will attempt to generate an etcd discovery token by running `curl` against the etcd discovery service. If do not have `curl` or do not wish to generate a new discovery url you can set `generate_discovery_url` to `0` and create a file `templates/discovery_url` which contains the discovery url you wish to use.
|
||||
|
||||
## Provision the Docker Swarm
|
||||
|
||||
With all your TF vars set you should be able to run `terraform apply` but lets check with `terraform plan` that things look correct first:
|
||||
|
||||
|
||||
```
|
||||
$ terraform plan
|
||||
Refreshing Terraform state prior to plan...
|
||||
...
|
||||
...
|
||||
+ template_file.discovery_url
|
||||
rendered: "" => "<computed>"
|
||||
template: "" => "templates/discovery_url"
|
||||
|
||||
Plan: 14 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
With no errors showing here we can go ahead and run
|
||||
|
||||
```
|
||||
$ terraform apply
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 14 added, 0 changed, 0 destroyed.
|
||||
|
||||
The state of your infrastructure has been saved to the path
|
||||
below. This state is required to modify and destroy your
|
||||
infrastructure, so keep it safe. To inspect the complete state
|
||||
use the `terraform show` command.
|
||||
|
||||
State path: terraform.tfstate
|
||||
|
||||
Outputs:
|
||||
|
||||
swarm_cluster =
|
||||
Environment Variables for accessing Docker Swarm via floating IP of first host:
|
||||
export DOCKER_HOST=tcp://x.x.x.x:2375
|
||||
export DOCKER_TLS_VERIFY=1
|
||||
export DOCKER_CERT_PATH=/home/bacon/development/personal/terraform-dockerswarm-coreos/files/ssl
|
||||
```
|
||||
|
||||
_the final output uses the floating IP of the first Host. You could point at any of the hosts, or use a FQDN with round robin DNS pointing at all the hosts. I avoided using neutron's load balancing service as it is not yet standard on OpenStack installs._
|
||||
|
||||
## Next Steps
|
||||
|
||||
### Check its up
|
||||
|
||||
copy and paste the above output into your shell and attempt to run `docker info`:
|
||||
|
||||
```
|
||||
$ export DOCKER_HOST=tcp://x.x.x.x:2375
|
||||
$ export DOCKER_TLS_VERIFY=1
|
||||
$ export DOCKER_CERT_PATH=/home/bacon/development/personal/terraform-dockerswarm-coreos/files/ssl
|
||||
|
||||
$ docker info
|
||||
Containers: 6
|
||||
Images: 6
|
||||
Engine Version:
|
||||
Role: primary
|
||||
Strategy: spread
|
||||
Filters: health, port, dependency, affinity, constraint
|
||||
Nodes: 3
|
||||
swarm-testing-0.novalocal: 10.230.7.171:2376
|
||||
└ Status: Healthy
|
||||
└ Containers: 2
|
||||
└ Reserved CPUs: 0 / 2
|
||||
└ Reserved Memory: 0 B / 4.057 GiB
|
||||
└ Labels: executiondriver=native-0.2, kernelversion=4.3.0-coreos, operatingsystem=CoreOS 884.0.0, storagedriver=overlay
|
||||
swarm-testing-1.novalocal: 10.230.7.172:2376
|
||||
└ Status: Healthy
|
||||
└ Containers: 2
|
||||
└ Reserved CPUs: 0 / 2
|
||||
└ Reserved Memory: 0 B / 4.057 GiB
|
||||
└ Labels: executiondriver=native-0.2, kernelversion=4.3.0-coreos, operatingsystem=CoreOS 884.0.0, storagedriver=overlay
|
||||
swarm-testing-2.novalocal: 10.230.7.173:2376
|
||||
└ Status: Healthy
|
||||
└ Containers: 2
|
||||
└ Reserved CPUs: 0 / 2
|
||||
└ Reserved Memory: 0 B / 4.057 GiB
|
||||
└ Labels: executiondriver=native-0.2, kernelversion=4.3.0-coreos, operatingsystem=CoreOS 884.0.0, storagedriver=overlay
|
||||
CPUs: 6
|
||||
Total Memory: 12.17 GiB
|
||||
Name: swarm-testing-0.novalocal
|
||||
```
|
||||
|
||||
### Create an overlay network and run a container
|
||||
|
||||
Create a network overlay called `my-net`
|
||||
|
||||
```
|
||||
$ docker network create --driver overlay my-net
|
||||
ecfefdff938f506b09c5ea5b505ee8ace0ee7297d9d617d06b9bbaac5bf10fea
|
||||
$ docker network ls
|
||||
NETWORK ID NAME DRIVER
|
||||
38338f0ec63a swarm-testing-1.novalocal/host host
|
||||
c41436d91f29 swarm-testing-0.novalocal/none null
|
||||
e29c4451483f swarm-testing-0.novalocal/bridge bridge
|
||||
400130ea105b swarm-testing-2.novalocal/none null
|
||||
c8f15676b2a5 swarm-testing-2.novalocal/host host
|
||||
493127ad6577 swarm-testing-2.novalocal/bridge bridge
|
||||
74f862f34921 swarm-testing-1.novalocal/none null
|
||||
ecfefdff938f my-net overlay
|
||||
b09a38662087 swarm-testing-0.novalocal/host host
|
||||
cfbcfbd7de02 swarm-testing-1.novalocal/bridge bridge
|
||||
```
|
||||
|
||||
Run a container on the network on a specific host, then try to access it from another:
|
||||
|
||||
```
|
||||
$ docker run -itd --name=web --net=my-net --env="constraint:node==swarm-testing-1.novalocal" nginx
|
||||
53166b97adf2397403f00a2ffcdba635a7f08852c5fe4f452d6ca8c6f40bb80c
|
||||
$ docker run -it --rm --net=my-net --env="constraint:node==swarm-testing-2.novalocal" busybox wget -O- http://web
|
||||
Connecting to web (10.0.0.2:80)
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
...
|
||||
...
|
||||
<p><em>Thank you for using nginx.</em></p>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
Once you're done with it, don't forget to nuke the whole thing.
|
||||
|
||||
```
|
||||
$ terraform destroy \
|
||||
Do you really want to destroy?
|
||||
Terraform will delete all your managed infrastructure.
|
||||
There is no undo. Only 'yes' will be accepted to confirm.
|
||||
|
||||
Enter a value: yes
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 0 added, 0 changed, 14 destroyed.
|
||||
```
|
60
workloads/terraform/shade/dockerswarm/_securitygroups.tf
Normal file
60
workloads/terraform/shade/dockerswarm/_securitygroups.tf
Normal file
@ -0,0 +1,60 @@
|
||||
resource "openstack_compute_secgroup_v2" "swarm_base" {
|
||||
name = "${var.cluster_name}_swarm_base"
|
||||
description = "${var.cluster_name} - Docker Swarm Security Group"
|
||||
# SSH
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "${var.whitelist_network}"
|
||||
}
|
||||
# DOCKER SWARM
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "2375"
|
||||
to_port = "2375"
|
||||
cidr = "${var.whitelist_network}"
|
||||
}
|
||||
# DOCKER
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "2376"
|
||||
to_port = "2376"
|
||||
cidr = "${var.whitelist_network}"
|
||||
}
|
||||
# INTERNAL Communication only
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
|
||||
# DANGER DANGER DANGER
|
||||
# Uncomment these if you want to allow
|
||||
# unrestricted inbound access
|
||||
#rule {
|
||||
# ip_protocol = "tcp"
|
||||
# from_port = "1"
|
||||
# to_port = "65535"
|
||||
# cidr = "${var.whitelist_network}"
|
||||
#}
|
||||
#rule {
|
||||
# ip_protocol = "udp"
|
||||
# from_port = "1"
|
||||
# to_port = "65535"
|
||||
# cidr = "${var.whitelist_network}"
|
||||
#}
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
openssl genrsa -out files/ssl/ca-key.pem 2048
|
||||
|
||||
openssl req -x509 -new -nodes -key files/ssl/ca-key.pem -days 10000 -out files/ssl/ca.pem -subj '/CN=docker-CA'
|
||||
|
||||
openssl genrsa -out files/ssl/key.pem 2048
|
||||
|
||||
openssl req -new -key files/ssl/key.pem -out files/ssl/cert.csr -subj '/CN=docker-client' -config files/ssl/openssl.cnf
|
||||
|
||||
openssl x509 -req -in files/ssl/cert.csr -CA files/ssl/ca.pem -CAkey files/ssl/ca-key.pem \
|
||||
-CAcreateserial -out files/ssl/cert.pem -days 365 -extensions v3_req -extfile files/ssl/openssl.cnf
|
@ -0,0 +1,8 @@
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth, serverAuth
|
109
workloads/terraform/shade/dockerswarm/swarm.tf
Executable file
109
workloads/terraform/shade/dockerswarm/swarm.tf
Executable file
@ -0,0 +1,109 @@
|
||||
resource "null_resource" "discovery_url_template" {
|
||||
count = "${var.generate_discovery_url}"
|
||||
provisioner "local-exec" {
|
||||
command = "curl -s 'https://discovery.etcd.io/new?size=${var.cluster_size}' > templates/discovery_url"
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "generate_ssl" {
|
||||
count = "${var.generate_ssl}"
|
||||
provisioner "local-exec" {
|
||||
command = "bash files/ssl/generate-ssl.sh"
|
||||
}
|
||||
}
|
||||
|
||||
resource "template_file" "discovery_url" {
|
||||
template = "templates/discovery_url"
|
||||
depends_on = [
|
||||
"null_resource.discovery_url_template"
|
||||
]
|
||||
}
|
||||
|
||||
resource "template_file" "cloud_init" {
|
||||
template = "templates/cloud-init"
|
||||
vars {
|
||||
cluster_token = "${var.cluster_name}"
|
||||
discovery_url = "${template_file.discovery_url.rendered}"
|
||||
swarm_version = "${var.swarm_version}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "template_file" "10_docker_service" {
|
||||
template = "templates/10-docker-service.conf"
|
||||
vars {
|
||||
net_device = "${ var.net_device }"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "coreos" {
|
||||
count = "${var.cluster_size}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "coreos" {
|
||||
name = "swarm-${var.cluster_name}"
|
||||
public_key = "${file(var.public_key_path)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "coreos" {
|
||||
name = "swarm-${var.cluster_name}-${count.index}"
|
||||
count = "${var.cluster_size}"
|
||||
image_name = "${var.image_name}"
|
||||
flavor_name = "${var.flavor}"
|
||||
key_pair = "${openstack_compute_keypair_v2.coreos.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [
|
||||
"${openstack_compute_secgroup_v2.swarm_base.name}"
|
||||
]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.coreos.*.address, count.index)}"
|
||||
user_data = "${template_file.cloud_init.rendered}"
|
||||
provisioner "file" {
|
||||
source = "files"
|
||||
destination = "/tmp/files"
|
||||
connection {
|
||||
user = "core"
|
||||
}
|
||||
}
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
# Create TLS certs
|
||||
"mkdir -p /home/core/.docker",
|
||||
"cp /tmp/files/ssl/ca.pem /home/core/.docker/",
|
||||
"cp /tmp/files/ssl/cert.pem /home/core/.docker/",
|
||||
"cp /tmp/files/ssl/key.pem /home/core/.docker/",
|
||||
"echo 'subjectAltName = @alt_names' >> /tmp/files/ssl/openssl.cnf",
|
||||
"echo '[alt_names]' >> /tmp/files/ssl/openssl.cnf",
|
||||
"echo 'IP.1 = ${self.network.0.fixed_ip_v4}' >> /tmp/files/ssl/openssl.cnf",
|
||||
"echo 'IP.2 = ${element(openstack_networking_floatingip_v2.coreos.*.address, count.index)}' >> /tmp/files/ssl/openssl.cnf",
|
||||
"echo 'DNS.1 = ${var.fqdn}' >> /tmp/files/ssl/openssl.cnf",
|
||||
"echo 'DNS.2 = ${element(openstack_networking_floatingip_v2.coreos.*.address, count.index)}.xip.io' >> /tmp/files/ssl/openssl.cnf",
|
||||
"openssl req -new -key /tmp/files/ssl/key.pem -out /tmp/files/ssl/cert.csr -subj '/CN=docker-client' -config /tmp/files/ssl/openssl.cnf",
|
||||
"openssl x509 -req -in /tmp/files/ssl/cert.csr -CA /tmp/files/ssl/ca.pem -CAkey /tmp/files/ssl/ca-key.pem \\",
|
||||
"-CAcreateserial -out /tmp/files/ssl/cert.pem -days 365 -extensions v3_req -extfile /tmp/files/ssl/openssl.cnf",
|
||||
"sudo mkdir -p /etc/docker/ssl",
|
||||
"sudo cp /tmp/files/ssl/ca.pem /etc/docker/ssl/",
|
||||
"sudo cp /tmp/files/ssl/cert.pem /etc/docker/ssl/",
|
||||
"sudo cp /tmp/files/ssl/key.pem /etc/docker/ssl/",
|
||||
# Apply localized settings to services
|
||||
"sudo mkdir -p /etc/systemd/system/{docker,swarm-agent,swarm-manager}.service.d",
|
||||
"cat <<'EOF' > /tmp/10-docker-service.conf\n${template_file.10_docker_service.rendered}\nEOF",
|
||||
"sudo mv /tmp/10-docker-service.conf /etc/systemd/system/docker.service.d/",
|
||||
"sudo systemctl daemon-reload",
|
||||
"sudo systemctl restart docker.service",
|
||||
"sudo systemctl start swarm-agent.service",
|
||||
"sudo systemctl start swarm-manager.service",
|
||||
]
|
||||
connection {
|
||||
user = "core"
|
||||
}
|
||||
}
|
||||
depends_on = [
|
||||
"template_file.cloud_init"
|
||||
]
|
||||
}
|
||||
|
||||
output "swarm_cluster" {
|
||||
value = "\nEnvironment Variables for accessing Docker Swarm via floating IP of first host:\nexport DOCKER_HOST=tcp://${openstack_networking_floatingip_v2.coreos.0.address}:2375\nexport DOCKER_TLS_VERIFY=1\nexport DOCKER_CERT_PATH=${path.module}/files/ssl"
|
||||
}
|
2
workloads/terraform/shade/dockerswarm/templates/10-docker-service.conf
Executable file
2
workloads/terraform/shade/dockerswarm/templates/10-docker-service.conf
Executable file
@ -0,0 +1,2 @@
|
||||
[Service]
|
||||
Environment="DOCKER_OPTS=-H=0.0.0.0:2376 -H unix:///var/run/docker.sock --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem --cluster-advertise ${net_device}:2376 --cluster-store etcd://127.0.0.1:2379/docker"
|
48
workloads/terraform/shade/dockerswarm/templates/cloud-init
Executable file
48
workloads/terraform/shade/dockerswarm/templates/cloud-init
Executable file
@ -0,0 +1,48 @@
|
||||
#cloud-config
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd.service
|
||||
mask: true
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: docker.service
|
||||
command: start
|
||||
- name: swarm-agent.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=swarm agent
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/environment
|
||||
TimeoutStartSec=20m
|
||||
ExecStartPre=/usr/bin/docker pull swarm:${swarm_version}
|
||||
ExecStartPre=-/usr/bin/docker rm -f swarm-agent
|
||||
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-agent swarm:${swarm_version} join --addr=$COREOS_PRIVATE_IPV4:2376 etcd://$COREOS_PRIVATE_IPV4:2379/docker"
|
||||
ExecStop=/usr/bin/docker stop swarm-agent
|
||||
- name: swarm-manager.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=swarm manager
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/environment
|
||||
TimeoutStartSec=20m
|
||||
ExecStartPre=/usr/bin/docker pull swarm:${swarm_version}
|
||||
ExecStartPre=-/usr/bin/docker rm -f swarm-manager
|
||||
ExecStart=/bin/sh -c "/usr/bin/docker run --rm --name swarm-manager -v /etc/docker/ssl:/etc/docker/ssl --net=host swarm:${swarm_version} manage --tlsverify --tlscacert=/etc/docker/ssl/ca.pem --tlscert=/etc/docker/ssl/cert.pem --tlskey=/etc/docker/ssl/key.pem etcd://$COREOS_PRIVATE_IPV4:2379/docker"
|
||||
ExecStop=/usr/bin/docker stop swarm-manager
|
||||
etcd2:
|
||||
discovery: ${discovery_url}
|
||||
advertise-client-urls: http://$private_ipv4:2379
|
||||
initial-advertise-peer-urls: http://$private_ipv4:2380
|
||||
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
|
||||
listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
|
||||
data-dir: /var/lib/etcd2
|
||||
initial-cluster-token: ${cluster_token}
|
||||
update:
|
||||
reboot-strategy: "off"
|
4
workloads/terraform/shade/dockerswarm/vars-coreos.tf
Normal file
4
workloads/terraform/shade/dockerswarm/vars-coreos.tf
Normal file
@ -0,0 +1,4 @@
|
||||
variable "generate_discovery_url" {
|
||||
default = 1
|
||||
description = "set to 0 if you do not want to autogenerate the discovery url"
|
||||
}
|
46
workloads/terraform/shade/dockerswarm/vars-openstack.tf
Normal file
46
workloads/terraform/shade/dockerswarm/vars-openstack.tf
Normal file
@ -0,0 +1,46 @@
|
||||
variable "image_name" {
|
||||
default = "coreos"
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
default = "external"
|
||||
}
|
||||
|
||||
variable "net_device" {
|
||||
description = "Network interface device in the system"
|
||||
default = "eth0"
|
||||
}
|
||||
|
||||
variable "flavor" {
|
||||
default = "m1.medium"
|
||||
}
|
||||
|
||||
variable "username" {
|
||||
description = "Your openstack username"
|
||||
}
|
||||
|
||||
variable "password" {
|
||||
description = "Your openstack password"
|
||||
}
|
||||
|
||||
variable "tenant" {
|
||||
description = "Your openstack tenant/project"
|
||||
}
|
||||
|
||||
variable "auth_url" {
|
||||
description = "Your openstack auth URL"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
description = "The path of the ssh pub key"
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
}
|
||||
|
||||
variable "whitelist_network" {
|
||||
description = "network to allow connectivity from"
|
||||
default = "0.0.0.0/0"
|
||||
}
|
21
workloads/terraform/shade/dockerswarm/vars-swarm.tf
Normal file
21
workloads/terraform/shade/dockerswarm/vars-swarm.tf
Normal file
@ -0,0 +1,21 @@
|
||||
variable "cluster_size" {
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
default = "testing"
|
||||
}
|
||||
|
||||
variable "swarm_version" {
|
||||
default = "latest"
|
||||
}
|
||||
|
||||
variable "generate_ssl" {
|
||||
description = "set to 0 if you want to reuse ssl certs"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "fqdn" {
|
||||
description = "Fully Qualified DNS to add to TLS certs"
|
||||
default = "swarm.example.com"
|
||||
}
|
162
workloads/terraform/shade/lampstack/README.md
Executable file
162
workloads/terraform/shade/lampstack/README.md
Executable file
@ -0,0 +1,162 @@
|
||||
# LAMPstack Terraform deployments
|
||||
|
||||
## Status
|
||||
|
||||
This will install a 3 node lampstack by defulat. Two nodes will be used as
|
||||
web servers and one node will be used as database node.
|
||||
|
||||
Once the script finishes, a set of URL will be displayed at the end for
|
||||
verification.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- Make sure there is an Ubuntu image available on your cloud.
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources required to
|
||||
LAMP stack and all required software.
|
||||
|
||||
### Prep
|
||||
|
||||
#### Deal with ssh keys for Openstack Authentication
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added.
|
||||
This step is required by the terraform provisioner. Otherwise, you will have
|
||||
to use a key pair without passphrase.
|
||||
|
||||
```
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
|
||||
```
|
||||
|
||||
#### General Openstack Settings
|
||||
|
||||
Terraform OpenStack provider requires few environment variables to be set
|
||||
before you can run the scripts. In general, you can simply export OS
|
||||
environment varialbes like the following:
|
||||
|
||||
```
|
||||
export OS_REGION_NAME=RegionOne
|
||||
export OS_PROJECT_NAME=demo
|
||||
export OS_IDENTITY_API_VERSION=3
|
||||
export OS_PASSWORD=secret
|
||||
export OS_DOMAIN_NAME=default
|
||||
export OS_USERNAME=demo
|
||||
export OS_TENANT_NAME=demo
|
||||
export OS_PROJECT_DOMAIN_NAME=default
|
||||
export OS_AUTH_URL=http://9.30.217.9:5000/v3
|
||||
|
||||
```
|
||||
|
||||
The values of these variables should be provided by your cloud provider. When
|
||||
use keystone 2.0 API, you will not need to setup domain name.
|
||||
|
||||
#### LAMP Stack Settings
|
||||
|
||||
You most likely will need to specify the name of your Ubuntu `glance` image,
|
||||
flavor, lamp stack size (how many nodes in the stack), private and public
|
||||
network names, and keys. Here is the list of the default values defined in file
|
||||
vars_lampstack.tf.
|
||||
|
||||
```
|
||||
image_name = "ubuntu-14.04"
|
||||
private_net = "internal"
|
||||
public_net = "external"
|
||||
flavor = "m1.medium"
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
stack_size = 3
|
||||
db_username = dbuser
|
||||
db_password = dbpass
|
||||
```
|
||||
|
||||
You can change the settings in the file or you can simply set in environment
|
||||
variables like the following:
|
||||
|
||||
```
|
||||
export TF_VAR_image_name="trusty 1404"
|
||||
export TF_VAR_private_net=Bluebox
|
||||
export TF_VAR_public_net=internet
|
||||
export TF_VAR_flavor="m1.small"
|
||||
export TF_VAR_public_key_path="~/.ssh/id_rsa.pub"
|
||||
export TF_VAR_stack_size=5
|
||||
export TF_VAR_db_username=george
|
||||
export TF_VAR_db_password=secret
|
||||
|
||||
```
|
||||
|
||||
## Provision the LAMP stack
|
||||
|
||||
With all your OpenStack and TF vars set, you should be able to run
|
||||
`terraform apply`. But lets check with `terraform plan` that things look
|
||||
correct first:
|
||||
|
||||
|
||||
```
|
||||
$ terraform plan
|
||||
Refreshing Terraform state prior to plan...
|
||||
...
|
||||
...
|
||||
+ openstack_networking_floatingip_v2.database
|
||||
address: "<computed>"
|
||||
fixed_ip: "<computed>"
|
||||
pool: "internet"
|
||||
port_id: "<computed>"
|
||||
region: "RegionOne"
|
||||
tenant_id: "<computed>"
|
||||
|
||||
|
||||
Plan: 8 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
If there is no errors showing, we can go ahead and run
|
||||
|
||||
```
|
||||
$ terraform apply
|
||||
...
|
||||
...
|
||||
Outputs:
|
||||
|
||||
lampstack = Success!!!
|
||||
|
||||
Access service at the following URLs:
|
||||
http://99.30.217.44
|
||||
http://99.30.217.42
|
||||
|
||||
```
|
||||
|
||||
The above results show that the LAMP Stack actually provisioned correctly
|
||||
and the LAMP application is up running and can be accessed by either of the
|
||||
urls.
|
||||
|
||||
|
||||
## Next Steps
|
||||
|
||||
### Check its up
|
||||
|
||||
Use the access urls to access the application. Since there are multiple web
|
||||
server nodes, any of the urls should work.
|
||||
|
||||
```
|
||||
$ curl http://99.30.217.44
|
||||
$ curl http://99.30.217.42
|
||||
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
Once you're done with it, don't forget to nuke the whole thing.
|
||||
|
||||
```
|
||||
$ terraform destroy \
|
||||
Do you really want to destroy?
|
||||
Terraform will delete all your managed infrastructure.
|
||||
There is no undo. Only 'yes' will be accepted to confirm.
|
||||
|
||||
Enter a value: yes
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 0 added, 0 changed, 8 destroyed.
|
||||
```
|
108
workloads/terraform/shade/lampstack/lampstack.tf
Executable file
108
workloads/terraform/shade/lampstack/lampstack.tf
Executable file
@ -0,0 +1,108 @@
|
||||
# The terraform to stand up LAMP stack
|
||||
|
||||
resource "openstack_compute_keypair_v2" "lampstack_key" {
|
||||
name = "lampstack_key"
|
||||
public_key = "${file(var.public_key_path)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "lampstack_sg" {
|
||||
name = "lampstack_sg"
|
||||
description = "lampstack security group"
|
||||
rule {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
ip_protocol = "tcp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
ip_protocol = "tcp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
from_port = 3306
|
||||
to_port = 3306
|
||||
ip_protocol = "tcp"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "database" {
|
||||
count = 1
|
||||
pool = "${var.public_net}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "database" {
|
||||
name = "database"
|
||||
image_name = "${var.image_name}"
|
||||
flavor_name = "${var.flavor}"
|
||||
key_pair = "lampstack_key"
|
||||
security_groups = ["${openstack_compute_secgroup_v2.lampstack_sg.name}"]
|
||||
network {
|
||||
name = "${var.private_net}"
|
||||
}
|
||||
floating_ip = "${openstack_networking_floatingip_v2.database.0.address}"
|
||||
|
||||
connection {
|
||||
user = "ubuntu"
|
||||
timeout = "30s"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = "onvm"
|
||||
destination = "/tmp/onvm"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"echo ${self.network.0.fixed_ip_v4} database > /tmp/onvm/hostname",
|
||||
"chmod +x /tmp/onvm/scripts/*",
|
||||
"/tmp/onvm/scripts/installdb.sh ${var.db_username} ${var.db_password}"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "apache" {
|
||||
count = "${var.stack_size - 1}"
|
||||
pool = "${var.public_net}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "apache" {
|
||||
name = "apache_${count.index}"
|
||||
count = "${var.stack_size - 1}"
|
||||
image_name = "${var.image_name}"
|
||||
flavor_name = "${var.flavor}"
|
||||
key_pair = "lampstack_key"
|
||||
security_groups = ["${openstack_compute_secgroup_v2.lampstack_sg.name}"]
|
||||
network {
|
||||
name = "${var.private_net}"
|
||||
}
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.apache.*.address, count.index)}"
|
||||
|
||||
depends_on = [ "openstack_compute_instance_v2.database" ]
|
||||
|
||||
connection {
|
||||
user = "ubuntu"
|
||||
timeout = "30s"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = "onvm"
|
||||
destination = "/tmp/onvm"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"echo ${openstack_compute_instance_v2.database.network.0.fixed_ip_v4} database > /tmp/onvm/hostname",
|
||||
"echo ${self.network.0.fixed_ip_v4} apache-${count.index} >> /tmp/onvm/hostname",
|
||||
"chmod +x /tmp/onvm/scripts/*",
|
||||
"/tmp/onvm/scripts/installapache.sh ${var.db_username} ${var.db_password}"
|
||||
]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
output "lampstack" {
|
||||
value = "Success!!!\n\nAccess service at the following URLs:\nhttp://${join("\nhttp://",openstack_compute_instance_v2.apache.*.floating_ip)}"
|
||||
}
|
15
workloads/terraform/shade/lampstack/onvm/app/index.php
Executable file
15
workloads/terraform/shade/lampstack/onvm/app/index.php
Executable file
@ -0,0 +1,15 @@
|
||||
<?php
|
||||
$servername = "database";
|
||||
$username = "TTTFFFdbuser";
|
||||
$password = "TTTFFFdbpass";
|
||||
$dbname = "decision2016";
|
||||
|
||||
try {
|
||||
$conn = new PDO("mysql:host=$servername;dbname=$dbname", $username, $password);
|
||||
$conn->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
|
||||
echo "Connected successfully";
|
||||
}
|
||||
catch(PDOException $e) {
|
||||
echo "Connection failed: " . $e->getMessage();
|
||||
}
|
||||
?>
|
19
workloads/terraform/shade/lampstack/onvm/scripts/installapache.sh
Executable file
19
workloads/terraform/shade/lampstack/onvm/scripts/installapache.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
# $1 db_username
|
||||
# $2 db_password
|
||||
|
||||
cat /tmp/onvm/hostname | sudo tee -a /etc/hosts >/dev/null
|
||||
echo 'Installing apache2 and php 5...'
|
||||
sudo apt-get -qqy update
|
||||
sudo apt-get -qqy install apache2 php5 php5-mysql
|
||||
echo 'ServerName localhost' | sudo tee -a /etc/apache2/apache2.conf >/dev/null
|
||||
|
||||
sudo mv /tmp/onvm/app/* /var/www/html
|
||||
sudo chown -R www-data:www-data /var/www/html
|
||||
sudo rm -r -f /var/www/html/index.html
|
||||
|
||||
cmdStr=$(echo "s/TTTFFFdbuser/$1/g")
|
||||
sudo sed -i -e "${cmdStr}" /var/www/html/index.php
|
||||
|
||||
cmdStr=$(echo "s/TTTFFFdbpass/$2/g")
|
||||
sudo sed -i -e "${cmdStr}" /var/www/html/index.php
|
21
workloads/terraform/shade/lampstack/onvm/scripts/installdb.sh
Executable file
21
workloads/terraform/shade/lampstack/onvm/scripts/installdb.sh
Executable file
@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
# $1 db_username
|
||||
# $2 db_password
|
||||
|
||||
cat /tmp/onvm/hostname | sudo tee -a /etc/hosts >/dev/null
|
||||
pw=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 10 | head -n 1)
|
||||
sudo apt-get -qqy update
|
||||
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password password $pw"
|
||||
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $pw"
|
||||
sudo apt-get -qqy install mysql-server
|
||||
echo 'Creating a database...'
|
||||
|
||||
mysql -uroot -p$pw -e "CREATE DATABASE decision2016;"
|
||||
mysql -uroot -p$pw -e "use decision2016; GRANT ALL PRIVILEGES ON decision2016.* TO '$1'@'localhost' IDENTIFIED BY '$2';"
|
||||
mysql -uroot -p$pw -e "use decision2016; GRANT ALL PRIVILEGES ON decision2016.* TO '$1'@'%' IDENTIFIED BY '$2';"
|
||||
mysql -uroot -p$pw -e "flush privileges"
|
||||
|
||||
cmdStr=$(echo 's/127.0.0.1/database/g')
|
||||
sudo sed -i -e "${cmdStr}" /etc/mysql/my.cnf
|
||||
|
||||
sudo service mysql restart
|
34
workloads/terraform/shade/lampstack/vars_lampstack.tf
Executable file
34
workloads/terraform/shade/lampstack/vars_lampstack.tf
Executable file
@ -0,0 +1,34 @@
|
||||
variable "image_name" {
|
||||
default = "ubuntu-14.04"
|
||||
}
|
||||
|
||||
variable "private_net" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "public_net" {
|
||||
default = "external"
|
||||
}
|
||||
|
||||
variable "flavor" {
|
||||
default = "m1.medium"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
description = "The path of the ssh pub key"
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
}
|
||||
|
||||
variable "stack_size" {
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "db_username" {
|
||||
description = "The lamp stack database user for remote access"
|
||||
default = "dbuser"
|
||||
}
|
||||
|
||||
variable "db_password" {
|
||||
description = "The lamp stack database user password for remote access"
|
||||
default = "dbpass"
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user