Move all examples to examples dir
This commit is contained in:
parent
0fd4f22372
commit
72780885e1
109
examples/bootstrap/example-bootstrap.py
Normal file
109
examples/bootstrap/example-bootstrap.py
Normal file
@ -0,0 +1,109 @@
|
||||
import click
|
||||
import sys
|
||||
import time
|
||||
|
||||
from solar.core import actions
|
||||
from solar.core import resource
|
||||
from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import errors
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
|
||||
GIT_PUPPET_LIBS_URL = 'https://github.com/CGenie/puppet-libs-resource'
|
||||
|
||||
|
||||
# TODO
|
||||
# Resource for repository OR puppet apt-module in run.pp
|
||||
# add-apt-repository cloud-archive:juno
|
||||
# To discuss: install stuff in Docker container
|
||||
|
||||
# NOTE
|
||||
# No copy of manifests, pull from upstream (implemented in the puppet handler)
|
||||
# Official puppet manifests, not fuel-library
|
||||
|
||||
|
||||
db = get_db()
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pass
|
||||
|
||||
|
||||
def setup_resources():
|
||||
db.clear()
|
||||
|
||||
signals.Connections.clear()
|
||||
|
||||
node3 = vr.create('node3', 'resources/ro_node/', {
|
||||
'ip': '10.0.0.5',
|
||||
'ssh_key': '/vagrant/.vagrant/machines/solar-dev3/virtualbox/private_key',
|
||||
'ssh_user': 'vagrant'
|
||||
})[0]
|
||||
|
||||
solar_bootstrap3 = vr.create('solar_bootstrap3', 'resources/solar_bootstrap', {'master_ip': '10.0.0.2'})[0]
|
||||
|
||||
signals.connect(node3, solar_bootstrap3)
|
||||
|
||||
has_errors = False
|
||||
for r in locals().values():
|
||||
if not isinstance(r, resource.Resource):
|
||||
continue
|
||||
|
||||
print 'Validating {}'.format(r.name)
|
||||
errors = validation.validate_resource(r)
|
||||
if errors:
|
||||
has_errors = True
|
||||
print 'ERROR: %s: %s' % (r.name, errors)
|
||||
|
||||
if has_errors:
|
||||
sys.exit(1)
|
||||
|
||||
resources_to_run = [
|
||||
'solar_bootstrap3',
|
||||
]
|
||||
|
||||
|
||||
@click.command()
|
||||
def deploy():
|
||||
setup_resources()
|
||||
|
||||
# run
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
for name in resources_to_run:
|
||||
try:
|
||||
actions.resource_action(resources[name], 'run')
|
||||
except errors.SolarError as e:
|
||||
print 'WARNING: %s' % str(e)
|
||||
raise
|
||||
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
@click.command()
|
||||
def undeploy():
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
for name in reversed(resources_to_run):
|
||||
try:
|
||||
actions.resource_action(resources[name], 'remove')
|
||||
except errors.SolarError as e:
|
||||
print 'WARNING: %s' % str(e)
|
||||
|
||||
db.clear()
|
||||
|
||||
signals.Connections.clear()
|
||||
|
||||
|
||||
main.add_command(deploy)
|
||||
main.add_command(undeploy)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
61
examples/cli/example.sh
Executable file
61
examples/cli/example.sh
Executable file
@ -0,0 +1,61 @@
|
||||
#!/bin/bash
|
||||
set -eux
|
||||
|
||||
function clean_local {
|
||||
rm -rf /tmp/tmp*
|
||||
rm /tmp/storage/* || true
|
||||
rm /tmp/connections.yaml || true
|
||||
|
||||
mkdir -p /tmp/state
|
||||
|
||||
echo > /tmp/state/commit_log || true
|
||||
echo > /tmp/state/commited_data || true
|
||||
echo > /tmp/state/stage_log || true
|
||||
find /vagrant/solar/solar -name '*.pyc' -delete || true
|
||||
|
||||
sudo docker stop $(sudo docker ps -q) || true
|
||||
sudo docker rm $(sudo docker ps -qa) || true
|
||||
}
|
||||
|
||||
|
||||
function start {
|
||||
solar profile -c -t env/test_env -i prf1
|
||||
solar discover
|
||||
|
||||
solar assign -n 'node/node_2 | node/node_1' -r 'resources/docker'
|
||||
solar assign -n 'node/node_1' -r 'resources/mariadb'
|
||||
solar assign -n 'node/node_1' -r 'resources/keystone'
|
||||
solar assign -n 'node/node_1' -r 'resources/haproxy'
|
||||
solar assign -n 'node/node_1' -r 'resources/rabbitmq'
|
||||
|
||||
solar connect --profile prf1
|
||||
|
||||
./cli.py changes stage
|
||||
./cli.py changes commit
|
||||
}
|
||||
|
||||
|
||||
function scaleup {
|
||||
solar assign -n 'node/node_2' -r 'resource/keystone_config'
|
||||
solar assign -n 'node/node_2' -r 'resource/keystone_service'
|
||||
|
||||
solar connect --profile prf1
|
||||
|
||||
./cli.py changes stage
|
||||
./cli.py changes commit
|
||||
}
|
||||
|
||||
|
||||
function clean {
|
||||
solar run -a remove -t 'resource/mariadb_service' || true
|
||||
solar run -a remove -t 'resource/keystone_service' || true
|
||||
solar run -a remove -t 'resource/haproxy_service' || true
|
||||
solar run -a remove -t 'resource/rabbitmq_service' || true
|
||||
}
|
||||
|
||||
function clean_all {
|
||||
clean
|
||||
clean_local
|
||||
}
|
||||
|
||||
$1
|
146
examples/lxc/example-lxc.py
Normal file
146
examples/lxc/example-lxc.py
Normal file
@ -0,0 +1,146 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# To run:
|
||||
# example-lxc.py deploy
|
||||
# solar changes stage
|
||||
# solar changes process
|
||||
# solar orch run-once last
|
||||
# watch 'solar orch report last'
|
||||
|
||||
import click
|
||||
|
||||
from solar.core import signals
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from solar.system_log import change
|
||||
from solar.cli import orch
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pass
|
||||
|
||||
|
||||
def lxc_template(idx):
|
||||
return {
|
||||
'user': 'root',
|
||||
'mgmt_ip': '172.18.11.{}'.format(idx),
|
||||
'container_name': 'test{}'.format(idx),
|
||||
'inventory_hostname': 'test{}'.format(idx),
|
||||
'properties':
|
||||
{'container_release': 'trusty'},
|
||||
'container_networks':
|
||||
{'mgmt': {
|
||||
'address': '172.18.11.{}'.format(idx), # address for container
|
||||
'bridge': 'br-int53', # bridge to attach veth pair
|
||||
'bridge_address': '172.18.11.253/24',
|
||||
'interface': 'eth1', # interface name in container
|
||||
'netmask': '255.255.255.0',
|
||||
'type': 'veth'}}
|
||||
}
|
||||
|
||||
|
||||
@click.command()
|
||||
def deploy():
|
||||
db = get_db()
|
||||
db.clear()
|
||||
signals.Connections.clear()
|
||||
|
||||
node1 = vr.create('nodes', 'templates/nodes.yml', {})[0]
|
||||
seed = vr.create('nodes', 'templates/seed_node.yml', {})[0]
|
||||
|
||||
ssh_key = vr.create('ssh_key1', 'resources/ssh_key', {
|
||||
'keys_dir': '/vagrant/.ssh',
|
||||
'private_key': '/vagrant/.ssh/id_rsa',
|
||||
'public_key': '/vagrant/.ssh/id_rsa.pub',
|
||||
'passphrase': '',
|
||||
})[0]
|
||||
signals.connect(seed, ssh_key)
|
||||
|
||||
cnets1 = vr.create('cnets1', 'resources/container_networks', {
|
||||
'networks':
|
||||
{'mgmt': {
|
||||
'bridge': 'br-int53',
|
||||
'bridge_address': '172.18.11.254/24'
|
||||
}}
|
||||
})[0]
|
||||
cnets2 = vr.create('cnets2', 'resources/container_networks', {
|
||||
'networks':
|
||||
{'mgmt': {
|
||||
'bridge': 'br-int53',
|
||||
'bridge_address': '172.18.11.253/24'
|
||||
}}
|
||||
})[0]
|
||||
signals.connect(seed, cnets1)
|
||||
signals.connect(node1, cnets2)
|
||||
|
||||
vxlan_mesh1 = vr.create('vxlan_mesh1', 'resources/vxlan_mesh', {
|
||||
'id': 53,
|
||||
'parent': 'eth1',
|
||||
'master': 'br-int53'
|
||||
})[0]
|
||||
vxlan_mesh2 = vr.create('vxlan_mesh2', 'resources/vxlan_mesh', {
|
||||
'id': 53,
|
||||
'parent': 'eth1',
|
||||
'master': 'br-int53'
|
||||
})[0]
|
||||
# seed node should be connected anyway, because we need to be able to ssh
|
||||
# into containers from any node
|
||||
signals.connect(seed, vxlan_mesh1)
|
||||
signals.connect(node1, vxlan_mesh2)
|
||||
|
||||
lxc_infra1 = vr.create('lxc_infra1', 'resources/lxc_host', {})[0]
|
||||
signals.connect(node1, lxc_infra1)
|
||||
|
||||
lxc_hosts = range(28, 35)
|
||||
hosts_map = {}
|
||||
for idx in lxc_hosts:
|
||||
|
||||
lxc_host_idx = vr.create(
|
||||
'lxc_host{}'.format(idx),
|
||||
'resources/lxc_container', lxc_template(idx))[0]
|
||||
hosts_map[idx] = lxc_host_idx
|
||||
|
||||
signals.connect(node1, lxc_host_idx, {
|
||||
'ip': ['ansible_ssh_host', 'physical_host'],
|
||||
})
|
||||
# this is a required to introduce depends on relationship between lxc infre
|
||||
# and lxc container
|
||||
signals.connect(lxc_infra1, lxc_host_idx, {'provides': 'requires'})
|
||||
signals.connect(cnets2, lxc_host_idx)
|
||||
signals.connect(ssh_key, lxc_host_idx, {
|
||||
'public_key': 'pub_key',
|
||||
'private_key': 'user_key'})
|
||||
|
||||
# RABBIT
|
||||
rabbitmq_service1 = vr.create('rabbitmq_service1', 'resources/rabbitmq_service/', {
|
||||
'management_port': 15672,
|
||||
'port': 5672,
|
||||
})[0]
|
||||
openstack_vhost = vr.create('openstack_vhost', 'resources/rabbitmq_vhost/', {
|
||||
'vhost_name': 'openstack'
|
||||
})[0]
|
||||
|
||||
openstack_rabbitmq_user = vr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', {
|
||||
'user_name': 'openstack',
|
||||
'password': 'openstack_password'
|
||||
})[0]
|
||||
|
||||
signals.connect(hosts_map[28], rabbitmq_service1, {
|
||||
'mgmt_ip': 'ip',
|
||||
'user_key': 'ssh_key',
|
||||
'user': 'ssh_user'})
|
||||
signals.connect(rabbitmq_service1, openstack_vhost)
|
||||
signals.connect(rabbitmq_service1, openstack_rabbitmq_user)
|
||||
signals.connect(openstack_vhost, openstack_rabbitmq_user, {
|
||||
'vhost_name',
|
||||
})
|
||||
|
||||
print change.send_to_orchestration()
|
||||
|
||||
main.add_command(deploy)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Loading…
x
Reference in New Issue
Block a user