Merge branch 'master' into dry-run
This commit is contained in:
commit
f110e7d1a5
5
.gitignore
vendored
5
.gitignore
vendored
@ -17,9 +17,12 @@ rs/
|
||||
|
||||
solar.log
|
||||
x-venv/
|
||||
.tmp/
|
||||
modules/
|
||||
|
||||
celery*.pid
|
||||
celery*.log
|
||||
|
||||
*.dot
|
||||
*.png
|
||||
*.png
|
||||
resources_compiled.py
|
||||
|
100
README.md
100
README.md
@ -79,28 +79,30 @@ solar connections show
|
||||
solar connections graph
|
||||
```
|
||||
|
||||
You can also limit graph to show only specific resources:
|
||||
|
||||
```
|
||||
solar connections graph --start-with mariadb_service --end-with keystone_db
|
||||
```
|
||||
|
||||
* You can make sure that all input values are correct and mapped without duplicating your values with this command:
|
||||
```
|
||||
solar resource validate
|
||||
```
|
||||
|
||||
* Disconnect
|
||||
```
|
||||
solar disconnect mariadb_service node1
|
||||
```
|
||||
|
||||
* Tag a resource:
|
||||
```
|
||||
solar resource tag node1 test-tags # Remove tags
|
||||
solar resource tag node1 test-tag --delete
|
||||
```
|
||||
|
||||
# Low level API
|
||||
|
||||
## HAProxy deployment (not maintained)
|
||||
|
||||
```
|
||||
cd /vagrant
|
||||
python cli.py deploy haproxy_deployment/haproxy-deployment.yaml
|
||||
```
|
||||
|
||||
or from Python shell:
|
||||
|
||||
```
|
||||
from x import deployment
|
||||
|
||||
deployment.deploy('/vagrant/haproxy_deployment/haproxy-deployment.yaml')
|
||||
```
|
||||
|
||||
## Usage:
|
||||
|
||||
Creating resources:
|
||||
@ -188,42 +190,42 @@ instead, just use the `>` operator when specifying hash:
|
||||
solar resource action keystone_puppet run -d -m "{\"73c>\": \"./Puppetlabs-file\"}"
|
||||
```
|
||||
|
||||
## CLI
|
||||
## Resource compiling
|
||||
|
||||
You can do the above from the command-line client:
|
||||
You can compile all `meta.yaml` definitions into Python code with classes that
|
||||
derive from `Resource`. To do this run
|
||||
|
||||
```
|
||||
solar resource compile_all
|
||||
```
|
||||
|
||||
This generates file `resources_compiled.py` in the main directory (do not commit
|
||||
this file into the repo). Then you can import classes from that file, create
|
||||
their instances and assign values just like these were normal properties.
|
||||
If your editor supports Python static checking, you will have autocompletion
|
||||
there too. An example on how to create a node with this:
|
||||
|
||||
```
|
||||
import resources_compiled
|
||||
|
||||
node1 = resources_compiled.RoNodeResource('node1', None, {})
|
||||
node1.ip = '10.0.0.3'
|
||||
node1.ssh_key = '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key'
|
||||
node1.ssh_user = 'vagrant'
|
||||
```
|
||||
|
||||
## HAProxy deployment (not maintained)
|
||||
|
||||
```
|
||||
cd /vagrant
|
||||
|
||||
python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||
|
||||
python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||
|
||||
python cli.py resource create mariadb_keystone_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||
|
||||
python cli.py resource create mariadb_nova_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||
|
||||
# View resourcespython cli.py resource show rs/mariadb_keystone_data
|
||||
# Show all resources at location rs/
|
||||
python cli.py resource show rs/ --all
|
||||
|
||||
# Show resources with specific tagspython cli.py resources show rs/ --tag test
|
||||
|
||||
# Connect resourcespython cli.py connect rs/node2 rs/mariadb_keystone_data
|
||||
python cli.py connect rs/node1 rs/mariadb_nova_data
|
||||
# Test updatepython cli.py update rs/node2 '{"ip": "1.1.1.1"}'
|
||||
python cli.py resource show rs/mariadb_keystone_data # --> IP is 1.1.1.1
|
||||
|
||||
# View connections
|
||||
python cli.py connections show
|
||||
|
||||
# Outputs graph to 'graph.png' file, please note that arrows don't have "normal" pointers, but just the line is thicker
|
||||
# please see http://networkx.lanl.gov/_modules/networkx/drawing/nx_pylab.html
|
||||
python cli.py connections graph
|
||||
|
||||
# Disconnect
|
||||
python cli.py disconnect rs/mariadb_nova_data rs/node1
|
||||
|
||||
# Tag a resource:
|
||||
python cli.py resource tag rs/node1 test-tags# Remove tagspython cli.py resource tag rs/node1 test-tag --delete
|
||||
solar deploy haproxy_deployment/haproxy-deployment.yaml
|
||||
```
|
||||
|
||||
or from Python shell:
|
||||
|
||||
```
|
||||
from solar.core import deployment
|
||||
|
||||
deployment.deploy('/vagrant/haproxy_deployment/haproxy-deployment.yaml')
|
||||
```
|
||||
|
||||
|
@ -13,6 +13,7 @@ template-dir: /vagrant/templates
|
||||
|
||||
resources-directory: /tmp/git
|
||||
resources-files-mask: /vagrant/resources/*/*.yaml
|
||||
resources-compiled-file: /vagrant/resources_compiled.py
|
||||
node_resource_template: /vagrant/resources/ro_node/
|
||||
|
||||
state: /tmp/state/
|
||||
|
331
example-compiled-resources.py
Normal file
331
example-compiled-resources.py
Normal file
@ -0,0 +1,331 @@
|
||||
"""
|
||||
To run this code, first compile the resources with
|
||||
|
||||
solar resource compile_all
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
import sys
|
||||
import time
|
||||
|
||||
from solar.core import actions
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.core import resource
|
||||
from solar.core import signals
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
from solar.core.resource_provider import GitProvider, RemoteZipProvider
|
||||
|
||||
|
||||
GIT_KEYSTONE_RESOURCE_URL = 'https://github.com/loles/keystone-resource'
|
||||
ZIP_KEYSTONE_RESOURCE_URL = 'https://github.com/loles/keystone-resource/archive/master.zip'
|
||||
|
||||
import resources_compiled
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pass
|
||||
|
||||
|
||||
@click.command()
|
||||
def deploy():
|
||||
db = get_db()
|
||||
db.clear()
|
||||
|
||||
signals.Connections.clear()
|
||||
|
||||
#node1 = resource.create('node1', 'resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'})
|
||||
#node1 = resources_compiled.RoNodeResource('node1', None, {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'})
|
||||
node1 = resources_compiled.RoNodeResource('node1', None, {})
|
||||
node1.ip = '10.0.0.3'
|
||||
node1.ssh_key = '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key'
|
||||
node1.ssh_user = 'vagrant'
|
||||
#node2 = resource.create('node2', 'resources/ro_node/', {'ip': '10.0.0.4', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key', 'ssh_user': 'vagrant'})
|
||||
node2 = resources_compiled.RoNodeResource('node2', None, {'ip': '10.0.0.4', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key', 'ssh_user': 'vagrant'})
|
||||
|
||||
#rabbitmq_service1 = resource.create('rabbitmq_service1', 'resources/rabbitmq_service/', {'management_port': '15672', 'port': '5672', 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'})
|
||||
rabbitmq_service1 = resources_compiled.RabbitmqServiceResource('rabbitmq_service1', None, {'management_port': 15672, 'port': 5672, 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'})
|
||||
openstack_vhost = resource.create('openstack_vhost', 'resources/rabbitmq_vhost/', {'vhost_name': 'openstack'})[0]
|
||||
openstack_rabbitmq_user = resource.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', {'user_name': 'openstack', 'password': 'openstack_password'})[0]
|
||||
|
||||
node2 = vr.create('node2', 'resources/ro_node/', {'ip': '10.0.0.4', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key', 'ssh_user': 'vagrant'})[0]
|
||||
|
||||
mariadb_service1 = vr.create('mariadb_service1', 'resources/mariadb_service', {'image': 'mariadb', 'root_password': 'mariadb', 'port': 3306})[0]
|
||||
keystone_db = vr.create('keystone_db', 'resources/mariadb_keystone_db/', {'db_name': 'keystone_db', 'login_user': 'root'})[0]
|
||||
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_user/', {'user_name': 'keystone', 'user_password': 'keystone', 'login_user': 'root'})[0]
|
||||
|
||||
keystone_config1 = vr.create('keystone_config1', GitProvider(GIT_KEYSTONE_RESOURCE_URL, path='keystone_config'), {'config_dir': '/etc/solar/keystone', 'admin_token': 'admin'})[0]
|
||||
#keystone_service1 = vr.create('keystone_service1', RemoteZipProvider(ZIP_KEYSTONE_RESOURCE_URL, 'keystone_service'), {'port': 5001, 'admin_port': 35357})[0]
|
||||
keystone_service1 = vr.create('keystone_service1', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service'), {'port': 5001, 'admin_port': 35357})[0]
|
||||
|
||||
keystone_config2 = vr.create('keystone_config2', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_config'), {'config_dir': '/etc/solar/keystone', 'admin_token': 'admin'})[0]
|
||||
keystone_service2 = vr.create('keystone_service2', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service'), {'port': 5002, 'admin_port': 35358})[0]
|
||||
|
||||
haproxy_keystone_config = vr.create('haproxy_keystone1_config', 'resources/haproxy_service_config/', {'name': 'keystone_config', 'listen_port': 5000, 'servers':[], 'ports':[]})[0]
|
||||
haproxy_config = vr.create('haproxy_config', 'resources/haproxy_config', {'configs_names':[], 'configs_ports':[], 'listen_ports':[], 'configs':[]})[0]
|
||||
haproxy_service = vr.create('haproxy_service', 'resources/docker_container/', {'image': 'tutum/haproxy', 'ports': [], 'host_binds': [], 'volume_binds':[]})[0]
|
||||
|
||||
glance_db = vr.create('glance_db', 'resources/mariadb_db/', {'db_name': 'glance_db', 'login_user': 'root'})[0]
|
||||
glance_db_user = vr.create('glance_db_user', 'resources/mariadb_user/', {'user_name': 'glance', 'user_password': 'glance', 'login_user': 'root'})[0]
|
||||
|
||||
services_tenant = vr.create('glance_keystone_tenant', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_tenant'), {'tenant_name': 'services'})[0]
|
||||
|
||||
glance_keystone_user = vr.create('glance_keystone_user', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_user'), {'user_name': 'glance_admin', 'user_password': 'password1234', 'tenant_name': 'service_admins'})[0]
|
||||
glance_keystone_role = vr.create('glance_keystone_role', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_role'), {'role_name': 'admin'})[0]
|
||||
|
||||
# TODO: add api_host and registry_host -- they can be different! Currently 'ip' is used.
|
||||
glance_config = vr.create('glance_config', 'resources/glance_config/', {'api_port': 9393})[0]
|
||||
glance_api_container = vr.create('glance_api_container', 'resources/glance_api_service/', {'image': 'cgenie/centos-rdo-glance-api', 'ports': [{'value': [{'value': 9393}]}], 'host_binds': [], 'volume_binds': []})[0]
|
||||
glance_registry_container = vr.create('glance_registry_container', 'resources/glance_registry_service/', {'image': 'cgenie/centos-rdo-glance-registry', 'ports': [{'value': [{'value': 9191}]}], 'host_binds': [], 'volume_binds': []})[0]
|
||||
# TODO: admin_port should be refactored, we need to rethink docker
|
||||
# container resource and make it common for all
|
||||
# resources used in this demo
|
||||
glance_api_endpoint = vr.create('glance_api_endpoint', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service_endpoint'), {'adminurl': 'http://{{ip}}:{{admin_port}}', 'internalurl': 'http://{{ip}}:{{port}}', 'publicurl': 'http://{{ip}}:{{port}}', 'description': 'OpenStack Image Service', 'type': 'image'})[0]
|
||||
# TODO: ports value 9393 is a HACK -- fix glance_api_container's port and move to some config
|
||||
# TODO: glance registry container's API port needs to point to haproxy_config
|
||||
haproxy_glance_api_config = vr.create('haproxy_glance_api_config', 'resources/haproxy_service_config/', {'name': 'glance_api_config', 'listen_port': 9292, 'servers': [], 'ports':[{'value': 9393}]})[0]
|
||||
|
||||
admin_tenant = vr.create('admin_tenant', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_tenant'), {'tenant_name': 'admin'})[0]
|
||||
admin_user = vr.create('admin_user', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_user'), {'user_name': 'admin', 'user_password': 'admin'})[0]
|
||||
admin_role = vr.create('admin_role', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_role'), {'role_name': 'admin'})[0]
|
||||
keystone_service_endpoint = vr.create('keystone_service_endpoint', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service_endpoint'), {'adminurl': 'http://{{ip}}:{{admin_port}}/v2.0', 'internalurl': 'http://{{ip}}:{{port}}/v2.0', 'publicurl': 'http://{{ip}}:{{port}}/v2.0', 'description': 'OpenStack Identity Service', 'type': 'identity'})[0]
|
||||
openrc = vr.create('openrc_file', 'resources/openrc_file', {})[0]
|
||||
|
||||
|
||||
####
|
||||
# connections
|
||||
####
|
||||
|
||||
# mariadb
|
||||
signals.connect(node1, mariadb_service1)
|
||||
|
||||
# rabbitmq
|
||||
signals.connect(node1, rabbitmq_service1)
|
||||
signals.connect(rabbitmq_service1, openstack_vhost)
|
||||
signals.connect(rabbitmq_service1, openstack_rabbitmq_user)
|
||||
signals.connect(openstack_vhost, openstack_rabbitmq_user, {'vhost_name': 'vhost_name'})
|
||||
|
||||
# keystone db
|
||||
signals.connect(node1, keystone_db)
|
||||
signals.connect(mariadb_service1, keystone_db, {'root_password': 'login_password', 'port': 'login_port'})
|
||||
|
||||
# keystone_db_user
|
||||
signals.connect(node1, keystone_db_user)
|
||||
signals.connect(mariadb_service1, keystone_db_user, {'root_password': 'login_password', 'port': 'login_port'})
|
||||
signals.connect(keystone_db, keystone_db_user, {'db_name': 'db_name'})
|
||||
|
||||
signals.connect(node1, keystone_config1)
|
||||
signals.connect(mariadb_service1, keystone_config1, {'ip': 'db_host', 'port': 'db_port'})
|
||||
signals.connect(keystone_db_user, keystone_config1, {'db_name': 'db_name', 'user_name': 'db_user', 'user_password': 'db_password'})
|
||||
|
||||
signals.connect(node1, keystone_service1)
|
||||
signals.connect(keystone_config1, keystone_service1, {'config_dir': 'config_dir'})
|
||||
|
||||
signals.connect(node2, keystone_config2)
|
||||
signals.connect(mariadb_service1, keystone_config2, {'ip': 'db_host', 'port': 'db_port'})
|
||||
signals.connect(keystone_db_user, keystone_config2, {'db_name': 'db_name', 'user_name': 'db_user', 'user_password': 'db_password'})
|
||||
|
||||
signals.connect(node2, keystone_service2)
|
||||
signals.connect(keystone_config2, keystone_service2, {'config_dir': 'config_dir'})
|
||||
|
||||
signals.connect(keystone_service1, haproxy_keystone_config, {'ip': 'servers', 'port': 'ports'})
|
||||
signals.connect(keystone_service2, haproxy_keystone_config, {'ip': 'servers', 'port': 'ports'})
|
||||
|
||||
signals.connect(node2, haproxy_config)
|
||||
signals.connect(haproxy_keystone_config, haproxy_config, {'listen_port': 'listen_ports', 'name': 'configs_names', 'ports': 'configs_ports', 'servers': 'configs'})
|
||||
|
||||
signals.connect(node2, haproxy_service)
|
||||
signals.connect(haproxy_config, haproxy_service, {'listen_ports': 'ports', 'config_dir': 'host_binds'})
|
||||
|
||||
# keystone configuration
|
||||
signals.connect(keystone_config1, admin_tenant)
|
||||
signals.connect(keystone_service1, admin_tenant, {'admin_port': 'keystone_port', 'ip': 'keystone_host'})
|
||||
signals.connect(admin_tenant, admin_user)
|
||||
signals.connect(admin_user, admin_role)
|
||||
signals.connect(keystone_config1, keystone_service_endpoint)
|
||||
signals.connect(keystone_service1, keystone_service_endpoint, {'ip': 'keystone_host','admin_port': 'admin_port', 'port': 'port'})
|
||||
signals.connect(keystone_service1, keystone_service_endpoint, {'admin_port': 'keystone_port'})
|
||||
|
||||
# glance db
|
||||
signals.connect(node1, glance_db)
|
||||
signals.connect(mariadb_service1, glance_db, {'root_password': 'login_password', 'port': 'login_port'})
|
||||
signals.connect(node1, glance_db_user)
|
||||
signals.connect(mariadb_service1, glance_db_user, {'root_password': 'login_password', 'port': 'login_port'})
|
||||
signals.connect(glance_db, glance_db_user, {'db_name': 'db_name'})
|
||||
|
||||
# glance keystone user
|
||||
signals.connect(keystone_config1, services_tenant)
|
||||
signals.connect(keystone_service1, services_tenant, {'admin_port': 'keystone_port', 'ip': 'keystone_host'})
|
||||
signals.connect(services_tenant, glance_keystone_user) # standard ip, ssh_key, ssh_user
|
||||
signals.connect(glance_keystone_user, glance_keystone_role)
|
||||
signals.connect(keystone_service1, glance_keystone_user, {'admin_port': 'keystone_port', 'ip': 'keystone_host'})
|
||||
signals.connect(keystone_config1, glance_keystone_user, {'admin_token': 'admin_token'})
|
||||
signals.connect(glance_keystone_user, glance_config, {'user_name': 'keystone_admin_user', 'user_password': 'keystone_admin_password', 'tenant_name': 'keystone_admin_tenant'})
|
||||
signals.connect(keystone_service2, glance_config, {'admin_port': 'keystone_admin_port'})
|
||||
|
||||
# glance
|
||||
signals.connect(node2, glance_config)
|
||||
signals.connect(haproxy_keystone_config, glance_config, {'listen_port': 'keystone_port'})
|
||||
signals.connect(haproxy_service, glance_config, {'ip': 'keystone_ip'})
|
||||
signals.connect(mariadb_service1, glance_config, {'ip': 'mysql_ip'})
|
||||
signals.connect(glance_db, glance_config, {'db_name': 'mysql_db'})
|
||||
signals.connect(glance_db_user, glance_config, {'user_name': 'mysql_user', 'user_password': 'mysql_password'})
|
||||
signals.connect(node2, glance_api_container)
|
||||
signals.connect(glance_config, glance_api_container, {'config_dir': 'host_binds'})
|
||||
|
||||
signals.connect(glance_db_user, glance_api_container, {'user_password': 'db_password'})
|
||||
signals.connect(glance_keystone_user, glance_api_container, {'admin_token': 'keystone_admin_token', 'user_password': 'keystone_password'})
|
||||
signals.connect(haproxy_keystone_config, glance_api_container, {'listen_port': 'keystone_port'})
|
||||
signals.connect(haproxy_config, glance_api_container, {'ip': 'keystone_host'})
|
||||
|
||||
signals.connect(node2, glance_registry_container)
|
||||
signals.connect(glance_config, glance_registry_container, {'config_dir': 'host_binds'})
|
||||
|
||||
# glance haproxy
|
||||
signals.connect(glance_api_container, haproxy_glance_api_config, {'ip': 'servers'})
|
||||
#signals.connect(glance_config, haproxy_glance_api_config, {'api_port': 'ports'})
|
||||
signals.connect(haproxy_glance_api_config, haproxy_config, {'listen_port': 'listen_ports', 'name': 'configs_names', 'ports': 'configs_ports', 'servers': 'configs'})
|
||||
|
||||
# glance keystone endpoint
|
||||
#signals.connect(glance_api_container, glance_api_endpoint, {'ip': 'ip', 'ssh_user': 'ssh_user', 'ssh_key': 'ssh_key'})
|
||||
signals.connect(haproxy_service, glance_api_endpoint, {'ip': 'ip', 'ssh_user': 'ssh_user', 'ssh_key': 'ssh_key'})
|
||||
signals.connect(keystone_config1, glance_api_endpoint, {'admin_token': 'admin_token'})
|
||||
signals.connect(keystone_service1, glance_api_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port'})
|
||||
signals.connect(haproxy_glance_api_config, glance_api_endpoint, {'listen_port': 'admin_port'})
|
||||
signals.connect(haproxy_glance_api_config, glance_api_endpoint, {'listen_port': 'port'})
|
||||
signals.connect(node1, openrc)
|
||||
signals.connect(keystone_service1, openrc, {'ip': 'keystone_host', 'admin_port':'keystone_port'})
|
||||
signals.connect(admin_user, openrc, {'user_name': 'user_name','user_password':'password', 'tenant_name': 'tenant'})
|
||||
|
||||
|
||||
errors = vr.validate_resources()
|
||||
if errors:
|
||||
for r, error in errors:
|
||||
print 'ERROR: %s: %s' % (r.name, error)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# run
|
||||
actions.resource_action(mariadb_service1, 'run')
|
||||
actions.resource_action(rabbitmq_service1, 'run')
|
||||
actions.resource_action(openstack_vhost, 'run')
|
||||
actions.resource_action(openstack_rabbitmq_user, 'run')
|
||||
actions.resource_action(keystone_db, 'run')
|
||||
actions.resource_action(keystone_db_user, 'run')
|
||||
actions.resource_action(keystone_config1, 'run')
|
||||
actions.resource_action(keystone_service1, 'run')
|
||||
actions.resource_action(keystone_config2, 'run')
|
||||
actions.resource_action(keystone_service2, 'run')
|
||||
|
||||
actions.resource_action(haproxy_config, 'run')
|
||||
actions.resource_action(haproxy_service, 'run')
|
||||
|
||||
actions.resource_action(admin_tenant, 'run')
|
||||
actions.resource_action(admin_user, 'run')
|
||||
actions.resource_action(admin_role, 'run')
|
||||
actions.resource_action(keystone_service_endpoint, 'run')
|
||||
actions.resource_action(openrc, 'run')
|
||||
|
||||
actions.resource_action(services_tenant, 'run')
|
||||
actions.resource_action(glance_keystone_user, 'run')
|
||||
actions.resource_action(glance_keystone_role, 'run')
|
||||
actions.resource_action(glance_db, 'run')
|
||||
actions.resource_action(glance_db_user, 'run')
|
||||
actions.resource_action(glance_config, 'run')
|
||||
actions.resource_action(glance_api_container, 'run')
|
||||
time.sleep(10) #TODO fix
|
||||
actions.resource_action(glance_api_endpoint, 'run')
|
||||
actions.resource_action(glance_registry_container, 'run')
|
||||
time.sleep(10)
|
||||
|
||||
# HAProxy needs to be restarted after Glance API is up
|
||||
actions.resource_action(haproxy_service, 'remove')
|
||||
actions.resource_action(haproxy_service, 'run')
|
||||
time.sleep(10)
|
||||
|
||||
# test working configuration
|
||||
requests.get('http://%s:%s' % (keystone_service1.args['ip'].value, keystone_service1.args['port'].value))
|
||||
requests.get('http://%s:%s' % (keystone_service2.args['ip'].value, keystone_service2.args['port'].value))
|
||||
requests.get('http://%s:%s' % (haproxy_service.args['ip'].value, haproxy_service.args['ports'].value[0]['value'][0]['value']))
|
||||
|
||||
token_data = requests.post(
|
||||
'http://%s:%s/v2.0/tokens' % (haproxy_service.args['ip'].value, haproxy_keystone_config.args['listen_port'].value),
|
||||
json.dumps({
|
||||
'auth': {
|
||||
'tenantName': glance_keystone_user.args['tenant_name'].value,
|
||||
'passwordCredentials': {
|
||||
'username': glance_keystone_user.args['user_name'].value,
|
||||
'password': glance_keystone_user.args['user_password'].value,
|
||||
}
|
||||
}
|
||||
}),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
|
||||
token = token_data.json()['access']['token']['id']
|
||||
print 'TOKEN: {}'.format(token)
|
||||
|
||||
requests.get('http://%s:%s' % (rabbitmq_service1.args['ip'].value, rabbitmq_service1.args['management_port'].value))
|
||||
|
||||
images = requests.get(
|
||||
'http://%s:%s/v1/images' % (glance_api_container.args['ip'].value, haproxy_glance_api_config.args['listen_port'].value),
|
||||
headers={'X-Auth-Token': token}
|
||||
)
|
||||
assert images.json() == {'images': []}
|
||||
images = requests.get(
|
||||
'http://%s:%s' % (glance_registry_container.args['ip'].value, glance_registry_container.args['ports'].value[0]['value'][0]['value']),
|
||||
headers={'X-Auth-Token': token}
|
||||
)
|
||||
assert images.json() == {'images': []}
|
||||
|
||||
|
||||
@click.command()
|
||||
def undeploy():
|
||||
db = get_db()
|
||||
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
actions.resource_action(resources['glance_api_endpoint'], 'remove')
|
||||
actions.resource_action(resources['glance_api_container'], 'remove')
|
||||
actions.resource_action(resources['glance_registry_container'], 'remove')
|
||||
actions.resource_action(resources['glance_config'], 'remove')
|
||||
actions.resource_action(resources['glance_db_user'], 'remove')
|
||||
actions.resource_action(resources['glance_db'], 'remove')
|
||||
actions.resource_action(resources['glance_keystone_role'], 'remove')
|
||||
actions.resource_action(resources['glance_keystone_user'], 'remove')
|
||||
actions.resource_action(resources['glance_keystone_tenant'], 'remove')
|
||||
|
||||
actions.resource_action(resources['openrc_file'], 'remove')
|
||||
actions.resource_action(resources['keystone_service_endpoint'], 'remove')
|
||||
actions.resource_action(resources['admin_role'], 'remove')
|
||||
actions.resource_action(resources['admin_user'], 'remove')
|
||||
actions.resource_action(resources['admin_tenant'], 'remove')
|
||||
|
||||
actions.resource_action(resources['haproxy_service'], 'remove')
|
||||
actions.resource_action(resources['haproxy_config'], 'remove')
|
||||
actions.resource_action(resources['keystone_service2'], 'remove')
|
||||
actions.resource_action(resources['keystone_config2'], 'remove')
|
||||
actions.resource_action(resources['keystone_service1'], 'remove')
|
||||
actions.resource_action(resources['keystone_config1'], 'remove')
|
||||
actions.resource_action(resources['keystone_db_user'], 'remove')
|
||||
actions.resource_action(resources['keystone_db'], 'remove')
|
||||
actions.resource_action(resources['mariadb_service1'], 'remove')
|
||||
actions.resource_action(resources['openstack_rabbitmq_user'], 'remove')
|
||||
actions.resource_action(resources['openstack_vhost'], 'remove')
|
||||
actions.resource_action(resources['rabbitmq_service1'], 'remove')
|
||||
|
||||
db.clear()
|
||||
|
||||
signals.Connections.clear()
|
||||
|
||||
|
||||
main.add_command(deploy)
|
||||
main.add_command(undeploy)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -6,7 +6,7 @@ from solar.core import actions
|
||||
from solar.core import resource
|
||||
from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core import virtual_resource as vr
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
@ -17,10 +17,12 @@ GIT_PUPPET_LIBS_URL = 'https://github.com/CGenie/puppet-libs-resource'
|
||||
# TODO
|
||||
# Resource for repository OR puppet apt-module in run.pp
|
||||
# add-apt-repository cloud-archive:juno
|
||||
# No copy of manifests, pull from upstream
|
||||
# Official puppet manifests, not fuel-library
|
||||
# To discuss: install stuff in Docker container
|
||||
|
||||
# NOTE
|
||||
# No copy of manifests, pull from upstream (implemented in the puppet handler)
|
||||
# Official puppet manifests, not fuel-library
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
@ -36,65 +38,45 @@ def deploy():
|
||||
|
||||
node1 = vr.create('node1', 'resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'})[0]
|
||||
|
||||
rabbitmq_service1 = vr.create('rabbitmq_service1', 'resources/rabbitmq_service/', {'management_port': 15672, 'port': 5672, 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'})[0]
|
||||
# MARIADB
|
||||
mariadb_service1 = vr.create('mariadb_service1', 'resources/mariadb_service', {'image': 'mariadb', 'root_password': 'mariadb', 'port': 3306})[0]
|
||||
|
||||
signals.connect(node1, mariadb_service1)
|
||||
|
||||
# RABBIT
|
||||
rabbitmq_service1 = vr.create('rabbitmq1', 'resources/rabbitmq_service', {'management_port': 15672, 'port': 5672, 'node_name': 'rabbitmq_service1'})[0]
|
||||
openstack_vhost = vr.create('openstack_vhost', 'resources/rabbitmq_vhost/', {'vhost_name': 'openstack'})[0]
|
||||
openstack_rabbitmq_user = vr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', {'user_name': 'openstack', 'password': 'openstack_password'})[0]
|
||||
|
||||
mariadb_service1 = vr.create('mariadb_service1', 'resources/mariadb_service', {'image': 'mariadb', 'root_password': 'mariadb', 'port': 3306})[0]
|
||||
keystone_db = vr.create('keystone_db', 'resources/mariadb_keystone_db/', {'db_name': 'keystone_db', 'login_user': 'root'})[0]
|
||||
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_keystone_user/', {'new_user_name': 'keystone', 'new_user_password': 'keystone', 'login_user': 'root'})[0]
|
||||
|
||||
#keystone_puppet = vr.create('keystone_puppet', GitProvider(GIT_PUPPET_LIBS_URL, path='keystone'), {})[0]
|
||||
keystone_puppet = vr.create('keystone_puppet', 'resources/keystone_puppet', {})[0]
|
||||
|
||||
# # TODO: vhost cannot be specified in neutron Puppet manifests so this user has to be admin anyways
|
||||
# neutron_puppet = vr.create('neutron_puppet', GitProvider(GIT_PUPPET_LIBS_URL, path='neutron'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
|
||||
admin_tenant = vr.create('admin_tenant', 'resources/keystone_tenant', {'tenant_name': 'admin'})[0]
|
||||
admin_user = vr.create('admin_user', 'resources/keystone_user', {'user_name': 'admin', 'user_password': 'admin'})[0]
|
||||
admin_role = vr.create('admin_role', 'resources/keystone_role', {'role_name': 'admin'})[0]
|
||||
|
||||
services_tenant = vr.create('services_tenant', 'resources/keystone_tenant', {'tenant_name': 'services'})[0]
|
||||
neutron_keystone_user = vr.create('neutron_keystone_user', 'resources/keystone_user', {'user_name': 'neutron', 'user_password': 'neutron'})[0]
|
||||
neutron_keystone_role = vr.create('neutron_keystone_role', 'resources/keystone_role', {'role_name': 'neutron'})[0]
|
||||
|
||||
neutron_keystone_service_endpoint = vr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', {'adminurl': 'http://{{admin_ip}}:{{admin_port}}', 'internalurl': 'http://{{internal_ip}}:{{internal_port}}', 'publicurl': 'http://{{public_ip}}:{{public_port}}', 'description': 'OpenStack Network Service', 'type': 'network'})[0]
|
||||
|
||||
# #cinder_puppet = vr.create('cinder_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'cinder'), {})[0]
|
||||
# cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0]
|
||||
|
||||
# cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', {'user_name': 'cinder', 'user_password': 'cinder'})[0]
|
||||
# cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', {'role_name': 'cinder'})[0]
|
||||
|
||||
# #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
# # TODO: fix rabbitmq user/password
|
||||
# nova_network_puppet = vr.create('nova_network_puppet', 'resources/nova_network_puppet', {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
|
||||
# nova_keystone_user = vr.create('nova_keystone_user', 'resources/keystone_user', {'user_name': 'nova', 'user_password': 'nova'})[0]
|
||||
# nova_keystone_role = vr.create('nova_keystone_role', 'resources/keystone_role', {'role_name': 'nova'})[0]
|
||||
|
||||
# TODO: 'services' tenant-id is hardcoded
|
||||
#nova_keystone_service_endpoint = vr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', {'adminurl': 'http://{{ip}}:{{admin_port}}/v2/services', 'internalurl': 'http://{{ip}}:{{public_port}}/v2/services', 'publicurl': 'http://{{ip}}:{{port}}/v2/services', 'description': 'OpenStack Compute Service', 'type': 'compute', 'port': 8776, 'admin_port': 8776})[0]
|
||||
|
||||
|
||||
signals.connect(node1, rabbitmq_service1)
|
||||
signals.connect(rabbitmq_service1, openstack_vhost)
|
||||
signals.connect(rabbitmq_service1, openstack_rabbitmq_user)
|
||||
signals.connect(openstack_vhost, openstack_rabbitmq_user, {'vhost_name': 'vhost_name'})
|
||||
signals.connect(rabbitmq_service1, neutron_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
|
||||
# signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbitmq_vhost'})
|
||||
# signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbitmq_user', 'password': 'rabbitmq_password'})
|
||||
# signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
|
||||
# signals.connect(rabbitmq_service1, nova_network_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
|
||||
|
||||
signals.connect(node1, mariadb_service1)
|
||||
# KEYSTONE
|
||||
keystone_puppet = vr.create('keystone_puppet', 'resources/keystone_puppet', {})[0]
|
||||
keystone_db = vr.create('keystone_db', 'resources/mariadb_keystone_db/', {'db_name': 'keystone_db', 'login_user': 'root'})[0]
|
||||
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_keystone_user/', {'new_user_name': 'keystone', 'new_user_password': 'keystone', 'login_user': 'root'})[0]
|
||||
keystone_service_endpoint = vr.create('keystone_service_endpoint', 'resources/keystone_service_endpoint', {'endpoint_name': 'keystone', 'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2.0', 'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2.0', 'publicurl': 'http://{{public_ip}}:{{public_port}}/v2.0', 'description': 'OpenStack Identity Service', 'type': 'identity'})[0]
|
||||
|
||||
admin_tenant = vr.create('admin_tenant', 'resources/keystone_tenant', {'tenant_name': 'admin'})[0]
|
||||
admin_user = vr.create('admin_user', 'resources/keystone_user', {'user_name': 'admin', 'user_password': 'admin'})[0]
|
||||
admin_role = vr.create('admin_role', 'resources/keystone_role', {'role_name': 'admin'})[0]
|
||||
services_tenant = vr.create('services_tenant', 'resources/keystone_tenant', {'tenant_name': 'services'})[0]
|
||||
|
||||
signals.connect(node1, keystone_db)
|
||||
signals.connect(node1, keystone_db_user)
|
||||
signals.connect(node1, keystone_puppet)
|
||||
signals.connect(mariadb_service1, keystone_db, {'port': 'login_port', 'root_password': 'login_password'})
|
||||
signals.connect(mariadb_service1, keystone_db_user, {'port': 'login_port', 'root_password': 'login_password'})
|
||||
signals.connect(keystone_db, keystone_db_user, {'db_name': 'db_name'})
|
||||
|
||||
signals.connect(node1, keystone_service_endpoint)
|
||||
signals.connect(keystone_puppet, keystone_service_endpoint, {'admin_token': 'admin_token', 'admin_port': 'keystone_admin_port', 'ip': 'keystone_host'})
|
||||
signals.connect(keystone_puppet, keystone_service_endpoint, {'admin_port': 'admin_port', 'ip': 'admin_ip'})
|
||||
signals.connect(keystone_puppet, keystone_service_endpoint, {'port': 'internal_port', 'ip': 'internal_ip'})
|
||||
signals.connect(keystone_puppet, keystone_service_endpoint, {'port': 'public_port', 'ip': 'public_ip'})
|
||||
|
||||
signals.connect(keystone_puppet, admin_tenant)
|
||||
signals.connect(keystone_puppet, admin_tenant, {'admin_port': 'keystone_port', 'ip': 'keystone_host'})
|
||||
signals.connect(admin_tenant, admin_user)
|
||||
@ -102,34 +84,82 @@ def deploy():
|
||||
|
||||
signals.connect(keystone_puppet, services_tenant)
|
||||
signals.connect(keystone_puppet, services_tenant, {'admin_port': 'keystone_port', 'ip': 'keystone_host'})
|
||||
signals.connect(services_tenant, neutron_keystone_user)
|
||||
signals.connect(neutron_keystone_user, neutron_keystone_role)
|
||||
|
||||
signals.connect(node1, keystone_puppet)
|
||||
signals.connect(keystone_db, keystone_puppet, {'db_name': 'db_name'})
|
||||
signals.connect(keystone_db_user, keystone_puppet, {'new_user_name': 'db_user', 'new_user_password': 'db_password'})
|
||||
|
||||
# OPENRC
|
||||
openrc = vr.create('openrc_file', 'resources/openrc_file', {})[0]
|
||||
|
||||
signals.connect(node1, openrc)
|
||||
signals.connect(keystone_puppet, openrc, {'ip': 'keystone_host', 'admin_port':'keystone_port'})
|
||||
signals.connect(admin_user, openrc, {'user_name': 'user_name','user_password':'password', 'tenant_name': 'tenant'})
|
||||
|
||||
# NEUTRON
|
||||
# TODO: vhost cannot be specified in neutron Puppet manifests so this user has to be admin anyways
|
||||
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
|
||||
neutron_keystone_user = vr.create('neutron_keystone_user', 'resources/keystone_user', {'user_name': 'neutron', 'user_password': 'neutron'})[0]
|
||||
neutron_keystone_role = vr.create('neutron_keystone_role', 'resources/keystone_role', {'role_name': 'neutron'})[0]
|
||||
neutron_keystone_service_endpoint = vr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', {'endpoint_name': 'neutron', 'adminurl': 'http://{{admin_ip}}:{{admin_port}}', 'internalurl': 'http://{{internal_ip}}:{{internal_port}}', 'publicurl': 'http://{{public_ip}}:{{public_port}}', 'description': 'OpenStack Network Service', 'type': 'network'})[0]
|
||||
|
||||
signals.connect(node1, neutron_puppet)
|
||||
signals.connect(rabbitmq_service1, neutron_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
|
||||
signals.connect(admin_user, neutron_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'})
|
||||
signals.connect(keystone_puppet, neutron_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'})
|
||||
|
||||
signals.connect(neutron_puppet, neutron_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'})
|
||||
signals.connect(services_tenant, neutron_keystone_user)
|
||||
signals.connect(neutron_keystone_user, neutron_keystone_role)
|
||||
signals.connect(keystone_puppet, neutron_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'})
|
||||
signals.connect(neutron_puppet, neutron_keystone_service_endpoint, {'ip': 'admin_ip', 'port': 'admin_port'})
|
||||
signals.connect(neutron_puppet, neutron_keystone_service_endpoint, {'ip': 'internal_ip', 'port': 'internal_port'})
|
||||
signals.connect(neutron_puppet, neutron_keystone_service_endpoint, {'ip': 'public_ip', 'port': 'public_port'})
|
||||
signals.connect(keystone_puppet, neutron_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'})
|
||||
|
||||
# CINDER
|
||||
# # CINDER
|
||||
# cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {
|
||||
# 'rabbit_userid': 'guest', 'rabbit_password': 'guest'})[0]
|
||||
# cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', {
|
||||
# 'db_name': 'cinder_db', 'login_user': 'root'})[0]
|
||||
# cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', {
|
||||
# 'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0]
|
||||
# cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', {
|
||||
# 'user_name': 'cinder', 'user_password': 'cinder'})[0]
|
||||
# cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', {
|
||||
# 'role_name': 'cinder'})[0]
|
||||
# cinder_keystone_service_endpoint = vr.create(
|
||||
# 'cinder_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
# 'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
|
||||
# 'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
|
||||
# 'publicurl': 'http://{{public_ip}}:{{public_port}}',
|
||||
# 'description': 'OpenStack Network Service', 'type': 'network'})[0]
|
||||
|
||||
|
||||
# signals.connect(node1, cinder_db)
|
||||
# signals.connect(node1, cinder_db_user)
|
||||
# signals.connect(node1, cinder_puppet)
|
||||
# signals.connect(keystone_puppet, cinder_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'})
|
||||
# signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'})
|
||||
# signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'})
|
||||
# signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'})
|
||||
# signals.connect(mariadb_service1, cinder_db, {
|
||||
# 'port': 'login_port', 'root_password': 'login_password'})
|
||||
# signals.connect(mariadb_service1, cinder_db_user, {
|
||||
# 'port': 'login_port', 'root_password': 'login_password'})
|
||||
# signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'})
|
||||
|
||||
# signals.connect(services_tenant, cinder_keystone_user)
|
||||
# signals.connect(cinder_keystone_user, cinder_keystone_role)
|
||||
|
||||
# signals.connect(cinder_keystone_user, cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'})
|
||||
|
||||
# NOVA
|
||||
# #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
# # TODO: fix rabbitmq user/password
|
||||
# nova_network_puppet = vr.create('nova_network_puppet', 'resources/nova_network_puppet', {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
|
||||
# nova_keystone_user = vr.create('nova_keystone_user', 'resources/keystone_user', {'user_name': 'nova', 'user_password': 'nova'})[0]
|
||||
# nova_keystone_role = vr.create('nova_keystone_role', 'resources/keystone_role', {'role_name': 'nova'})[0]
|
||||
|
||||
# TODO: 'services' tenant-id is hardcoded
|
||||
# nova_keystone_service_endpoint = vr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', {'adminurl': 'http://{{ip}}:{{admin_port}}/v2/services', 'internalurl': 'http://{{ip}}:{{public_port}}/v2/services', 'publicurl': 'http://{{ip}}:{{port}}/v2/services', 'description': 'OpenStack Compute Service', 'type': 'compute', 'port': 8776, 'admin_port': 8776})[0]
|
||||
|
||||
# signals.connect(node1, nova_network_puppet)
|
||||
|
||||
# signals.connect(services_tenant, nova_keystone_user)
|
||||
@ -138,8 +168,9 @@ def deploy():
|
||||
# signals.connect(nova_keystone_user, nova_network_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'})
|
||||
# signals.connect(keystone_puppet, nova_network_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'})
|
||||
|
||||
#signals.connect(nova_network_puppet, nova_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'})
|
||||
#signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'})
|
||||
# signals.connect(nova_network_puppet, nova_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'})
|
||||
# signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'})
|
||||
# signals.connect(rabbitmq_service1, nova_network_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
|
||||
|
||||
|
||||
has_errors = False
|
||||
@ -167,11 +198,14 @@ def deploy():
|
||||
actions.resource_action(keystone_db, 'run')
|
||||
actions.resource_action(keystone_db_user, 'run')
|
||||
actions.resource_action(keystone_puppet, 'run')
|
||||
actions.resource_action(openrc, 'run')
|
||||
|
||||
actions.resource_action(admin_tenant, 'run')
|
||||
actions.resource_action(admin_user, 'run')
|
||||
actions.resource_action(admin_role, 'run')
|
||||
|
||||
actions.resource_action(keystone_service_endpoint, 'run')
|
||||
|
||||
actions.resource_action(services_tenant, 'run')
|
||||
actions.resource_action(neutron_keystone_user, 'run')
|
||||
actions.resource_action(neutron_keystone_role, 'run')
|
||||
@ -179,6 +213,8 @@ def deploy():
|
||||
actions.resource_action(neutron_puppet, 'run')
|
||||
actions.resource_action(neutron_keystone_service_endpoint, 'run')
|
||||
|
||||
# actions.resource_action(cinder_db, 'run')
|
||||
# actions.resource_action(cinder_db_user, 'run')
|
||||
# actions.resource_action(cinder_keystone_user, 'run')
|
||||
# actions.resource_action(cinder_keystone_role, 'run')
|
||||
|
||||
@ -197,40 +233,62 @@ def deploy():
|
||||
def undeploy():
|
||||
db = get_db()
|
||||
|
||||
to_remove = [
|
||||
'neutron_keystone_service_endpoint',
|
||||
'neutron_puppet',
|
||||
'neutron_keystone_role',
|
||||
'neutron_keystone_user',
|
||||
'services_tenant',
|
||||
#'keystone_service_endpoint',
|
||||
'admin_role',
|
||||
'admin_user',
|
||||
'admin_tenant',
|
||||
'openrc_file',
|
||||
'keystone_puppet',
|
||||
'keystone_db_user',
|
||||
'keystone_db',
|
||||
'mariadb_service1',
|
||||
'openstack_rabbitmq_user',
|
||||
'openstack_vhost',
|
||||
'rabbitmq1',
|
||||
]
|
||||
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
for name in to_remove:
|
||||
actions.resource_action(resources[name], 'remove')
|
||||
|
||||
#actions.resource_action(resources['nova_keystone_service_endpoint'], 'remove' )
|
||||
# actions.resource_action(resources['nova_network_puppet'], 'remove' )
|
||||
|
||||
# actions.resource_action(resources['nova_keystone_role'], 'remove')
|
||||
# actions.resource_action(resources['nova_keystone_user'], 'remove')
|
||||
|
||||
# actions.resource_action(resources['neutron_keystone_service_endpoint'], 'remove' )
|
||||
# actions.resource_action(resources['neutron_puppet'], 'remove' )
|
||||
|
||||
# actions.resource_action(resources['cinder_puppet'], 'remove' )
|
||||
|
||||
actions.resource_action(resources['neutron_keystone_service_endpoint'], 'remove' )
|
||||
actions.resource_action(resources['neutron_puppet'], 'remove' )
|
||||
|
||||
# actions.resource_action(resources['cinder_keystone_role'], 'remove')
|
||||
# actions.resource_action(resources['cinder_keystone_user'], 'remove')
|
||||
|
||||
actions.resource_action(resources['neutron_keystone_role'], 'remove')
|
||||
actions.resource_action(resources['neutron_keystone_user'], 'remove')
|
||||
actions.resource_action(resources['services_tenant'], 'remove')
|
||||
# actions.resource_action(resources['neutron_keystone_role'], 'remove')
|
||||
# actions.resource_action(resources['neutron_keystone_user'], 'remove')
|
||||
# actions.resource_action(resources['services_tenant'], 'remove')
|
||||
|
||||
actions.resource_action(resources['admin_role'], 'remove')
|
||||
actions.resource_action(resources['admin_user'], 'remove')
|
||||
actions.resource_action(resources['admin_tenant'], 'remove')
|
||||
# actions.resource_action(resources['admin_role'], 'remove')
|
||||
# actions.resource_action(resources['admin_user'], 'remove')
|
||||
# actions.resource_action(resources['admin_tenant'], 'remove')
|
||||
|
||||
actions.resource_action(resources['keystone_puppet'], 'remove')
|
||||
actions.resource_action(resources['keystone_db_user'], 'remove')
|
||||
actions.resource_action(resources['keystone_db'], 'remove')
|
||||
# actions.resource_action(resources['keystone_puppet'], 'remove')
|
||||
# actions.resource_action(resources['keystone_db_user'], 'remove')
|
||||
# actions.resource_action(resources['keystone_db'], 'remove')
|
||||
|
||||
actions.resource_action(resources['mariadb_service1'], 'remove')
|
||||
# actions.resource_action(resources['mariadb_service1'], 'remove')
|
||||
|
||||
actions.resource_action(resources['openstack_rabbitmq_user'], 'remove')
|
||||
actions.resource_action(resources['openstack_vhost'], 'remove')
|
||||
actions.resource_action(resources['rabbitmq_service1'], 'remove')
|
||||
# actions.resource_action(resources['openstack_rabbitmq_user'], 'remove')
|
||||
# actions.resource_action(resources['openstack_vhost'], 'remove')
|
||||
# actions.resource_action(resources['rabbitmq1'], 'remove')
|
||||
|
||||
db.clear()
|
||||
|
||||
|
13
main.yml
13
main.yml
@ -34,14 +34,19 @@
|
||||
- shell: gem build hiera-redis.gemspec && gem install hiera-redis-3.0.0.gem chdir=/root/hiera-redis
|
||||
- template: src=/vagrant/hiera.yaml dest=/etc/puppet/hiera.yaml
|
||||
- file: path=/etc/puppet/hieradata state=directory
|
||||
# Make paths puppet 4 compatible
|
||||
- file: path=/etc/puppetlabs/code/ state=directory
|
||||
- file: src=/etc/puppet/hiera.yaml dest=/etc/puppetlabs/code/hiera.yaml state=link
|
||||
- apt: name=ruby-dev state=present
|
||||
- shell: gem install librarian-puppet
|
||||
- file: path=/tmp/puppet-modules/modules state=directory
|
||||
- file: path=/tmp/puppet-modules/Puppetfile state=touch
|
||||
- file: path=/var/tmp/puppet/modules state=directory owner=puppet
|
||||
- file: path=/var/tmp/puppet/Puppetfile state=touch owner=puppet
|
||||
- file: path=/etc/puppet/modules state=absent
|
||||
- file: src=/tmp/puppet-modules/modules dest=/etc/puppet/modules state=link
|
||||
- file: path=/etc/puppetlabs/code/modules state=absent
|
||||
- file: path=/etc/puppetlabs/code/hieradata state=absent
|
||||
- file: src=/var/tmp/puppet/modules dest=/etc/puppet/modules state=link
|
||||
- file: src=/var/tmp/puppet/modules dest=/etc/puppetlabs/code/modules state=link
|
||||
- file: src=/etc/puppet/hieradata dest=/etc/puppetlabs/code/hieradata state=link
|
||||
- shell: gem install librarian-puppet --no-ri --no-rdoc
|
||||
|
||||
# Setup additional development tools
|
||||
- apt: name=vim state=present
|
||||
|
11
resources/haproxy_config/README.md
Normal file
11
resources/haproxy_config/README.md
Normal file
@ -0,0 +1,11 @@
|
||||
# `haproxy_config` resource
|
||||
|
||||
This resource represents configuration for the `haproxy_service` resource.
|
||||
Each service represented by Haproxy is connected to this resource via
|
||||
`haproxy_service_config` resource. This is because in Haproxy there is no
|
||||
support for something like `/etc/haproxy/conf.d` directory where you put
|
||||
each config in a separate file, but instead you must collect all configuration
|
||||
in one file.
|
||||
|
||||
So this resource renders this file from data provided by collecting individual
|
||||
`haproxy_service_config` data.
|
8
resources/haproxy_service/README.md
Normal file
8
resources/haproxy_service/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
# `haproxy_service` resource
|
||||
|
||||
This resource sets up a Docker container with Haproxy code. It requires
|
||||
config to be provided by the `haproxy_config` resource (mounted under
|
||||
`/etc/haproxy`).
|
||||
|
||||
About container philosophy, see the `README.md` file in `keystone_service`
|
||||
resource.
|
5
resources/haproxy_service_config/README.md
Normal file
5
resources/haproxy_service_config/README.md
Normal file
@ -0,0 +1,5 @@
|
||||
# `haproxy_service_config` resource
|
||||
|
||||
This resource represents config for a single service handled by Haproxy.
|
||||
It connects into `haproxy_config`. It collects all services which are to
|
||||
be load-balanced by Haproxy.
|
6
resources/keystone_puppet/README.md
Normal file
6
resources/keystone_puppet/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
# `keystone_puppet` resource
|
||||
|
||||
This resource implements inputs for the official OpenStack Keystone Puppet manifests
|
||||
from https://github.com/openstack/puppet-keystone (`stable/juno` branch).
|
||||
|
||||
Basic tests are present that test HTTP connectivity to the service.
|
@ -10,7 +10,7 @@ $port = $resource['input']['port']['value']
|
||||
|
||||
class {'keystone':
|
||||
package_ensure => 'present',
|
||||
verbose => True,
|
||||
verbose => true,
|
||||
catalog_type => 'sql',
|
||||
admin_token => $admin_token,
|
||||
database_connection => "mysql://$db_user:$db_password@$ip/$db_name",
|
||||
|
@ -1,5 +1,5 @@
|
||||
class {'keystone':
|
||||
verbose => True,
|
||||
verbose => true,
|
||||
catalog_type => 'sql',
|
||||
admin_token => '{{ admin_token }}',
|
||||
sql_connection => 'mysql://{{ db_user }}:{{ db_password }}@{{ ip }}/{{ db_name }}',
|
||||
|
@ -25,7 +25,7 @@ input:
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-keystone', branch: 'stable/juno'}
|
||||
value: {repository: 'https://github.com/openstack/puppet-keystone', branch: '5.1.0'}
|
||||
|
||||
# forge:
|
||||
# schema: str!
|
||||
|
11
resources/keystone_service/README.md
Normal file
11
resources/keystone_service/README.md
Normal file
@ -0,0 +1,11 @@
|
||||
# `keystone_service` resource
|
||||
|
||||
This resource sets up a Docker container with Keystone code. It requires
|
||||
config to be provided by the `keystone_config` resource (mounted under
|
||||
`/etc/keystone`).
|
||||
|
||||
Basically, the philosophy behind containers in Solar is to have stateless
|
||||
containers with service code and mount stateful resources with config,
|
||||
volumes, etc. to that container. Upgrade of code then would be just about
|
||||
replacing the stateless container with new one and remounting state to that
|
||||
new container.
|
@ -4,7 +4,7 @@
|
||||
- name: keystone service and endpoint
|
||||
keystone_service:
|
||||
token: {{admin_token}}
|
||||
name: {{resource_name}}
|
||||
name: {{endpoint_name}}
|
||||
type: {{type}}
|
||||
description: {{description}}
|
||||
publicurl: {{publicurl}}
|
||||
|
@ -12,6 +12,9 @@ input:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
endpoint_name:
|
||||
schema: str!
|
||||
value:
|
||||
type:
|
||||
schema: str!
|
||||
value:
|
||||
|
12
resources/rabbitmq_service/actions/remove.pp
Normal file
12
resources/rabbitmq_service/actions/remove.pp
Normal file
@ -0,0 +1,12 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$node_name = $resource['input']['node_name']['value']
|
||||
|
||||
class { '::rabbitmq':
|
||||
package_ensure => 'absent',
|
||||
environment_variables => {
|
||||
'RABBITMQ_NODENAME' => $node_name,
|
||||
'RABBITMQ_SERVICENAME' => 'RabbitMQ'
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: rabbitmq container
|
||||
docker:
|
||||
name: {{ container_name }}
|
||||
image: {{ image }}
|
||||
state: absent
|
16
resources/rabbitmq_service/actions/run.pp
Normal file
16
resources/rabbitmq_service/actions/run.pp
Normal file
@ -0,0 +1,16 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$port = "${resource['input']['port']['value']}"
|
||||
$management_port = "${resource['input']['management_port']['value']}"
|
||||
$node_name = $resource['input']['node_name']['value']
|
||||
|
||||
class { '::rabbitmq':
|
||||
service_manage => false,
|
||||
port => $port,
|
||||
management_port => $management_port,
|
||||
delete_guest_user => true,
|
||||
environment_variables => {
|
||||
'RABBITMQ_NODENAME' => $node_name,
|
||||
'RABBITMQ_SERVICENAME' => 'RabbitMQ'
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: rabbitmq container
|
||||
docker:
|
||||
command: /bin/bash -c rabbitmq-server
|
||||
name: {{ container_name }}
|
||||
image: {{ image }}
|
||||
state: running
|
||||
expose:
|
||||
- 5672
|
||||
- 15672
|
||||
ports:
|
||||
- {{ port }}:5672
|
||||
- {{ management_port }}:15672
|
||||
env:
|
||||
RABBITMQ_NODENAME: {{container_name}}
|
||||
- shell: docker exec -t {{ container_name }} rabbitmqctl list_users
|
||||
register: result
|
||||
until: result.rc == 0
|
||||
retries: 20
|
||||
delay: 0.5
|
@ -1,27 +1,29 @@
|
||||
id: rabbitmq_service
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
handler: puppet
|
||||
id: 'rabbitmq'
|
||||
input:
|
||||
image:
|
||||
schema: str!
|
||||
value: rabbitmq:3-management
|
||||
container_name:
|
||||
schema: str!
|
||||
value: rabbitmq_service
|
||||
management_port:
|
||||
schema: int!
|
||||
value: 15672
|
||||
port:
|
||||
schema: int!
|
||||
value: 5672
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value: ''
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value: ''
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value: ''
|
||||
|
||||
tags: [resources/rabbitmq, resource/rabbitmq_service]
|
||||
node_name:
|
||||
schema: str!
|
||||
value: 'node1'
|
||||
port:
|
||||
schema: int!
|
||||
value: ''
|
||||
management_port:
|
||||
schema: int!
|
||||
value: ''
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git', branch: '5.1.0'}
|
||||
|
||||
puppet_module: 'rabbitmq'
|
||||
tags: []
|
||||
version: 1.0.0
|
||||
|
@ -1 +0,0 @@
|
||||
{{erlang_cookie}}
|
@ -1,9 +0,0 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing rabbitmq_service')
|
||||
|
||||
requests.get('http://%s:%s' % (resource.args['ip'].value, resource.args['management_port'].value))
|
@ -4,5 +4,4 @@
|
||||
- new_rabbitmq_user: user={{user_name}}
|
||||
vhost={{vhost_name}}
|
||||
state=absent
|
||||
node={{container_name}}
|
||||
rabbitmqctl="docker exec -it {{container_name}} rabbitmqctl"
|
||||
node={{node_name}}
|
||||
|
@ -8,5 +8,4 @@
|
||||
read_priv=.*
|
||||
write_priv=.*
|
||||
state=present
|
||||
node={{container_name}}
|
||||
rabbitmqctl="docker exec -it {{container_name}} rabbitmqctl"
|
||||
node={{node_name}}
|
||||
|
@ -2,7 +2,7 @@ id: rabbitmq_user
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
container_name:
|
||||
node_name:
|
||||
schema: str!
|
||||
value:
|
||||
user_name:
|
||||
|
@ -3,5 +3,4 @@
|
||||
tasks:
|
||||
- new_rabbitmq_vhost: name={{vhost_name}}
|
||||
state=absent
|
||||
rabbitmqctl="docker exec -it {{container_name}} rabbitmqctl"
|
||||
node={{container_name}}
|
||||
node={{node_name}}
|
||||
|
@ -2,6 +2,5 @@
|
||||
sudo: yes
|
||||
tasks:
|
||||
- new_rabbitmq_vhost: name={{vhost_name}}
|
||||
node={{container_name}}
|
||||
node={{node_name}}
|
||||
state=present
|
||||
rabbitmqctl="docker exec -it {{container_name}} rabbitmqctl"
|
||||
|
@ -2,7 +2,7 @@ id: rabbitmq_vhost
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
container_name:
|
||||
node_name:
|
||||
schema: str!
|
||||
value:
|
||||
vhost_name:
|
||||
|
@ -12,5 +12,6 @@ enum34==1.0.4
|
||||
redis==2.10.3
|
||||
pytest
|
||||
fakeredis
|
||||
inflection
|
||||
Fabric==1.10.2
|
||||
tabulate==0.7.5
|
||||
|
@ -37,7 +37,7 @@ from solar.core.resource import assign_resources_to_nodes
|
||||
from solar.core import signals
|
||||
from solar.core.tags_set_parser import Expression
|
||||
from solar.core import testing
|
||||
from solar.core import virtual_resource as vr
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from solar.cli.orch import orchestration
|
||||
@ -326,7 +326,7 @@ def init_cli_connections():
|
||||
end_with=end_with)
|
||||
|
||||
nx.write_dot(g, 'graph.dot')
|
||||
fabric_api.local('dot', '-Tpng', 'graph.dot', '-o', 'graph.png')
|
||||
fabric_api.local('dot -Tpng graph.dot -o graph.png')
|
||||
|
||||
# Matplotlib
|
||||
#pos = nx.spring_layout(g)
|
||||
@ -373,6 +373,21 @@ def init_cli_resource():
|
||||
str(key)
|
||||
))
|
||||
|
||||
@resource.command()
|
||||
def compile_all():
|
||||
from solar.core.resource import compiler
|
||||
|
||||
destination_path = utils.read_config()['resources-compiled-file']
|
||||
|
||||
if os.path.exists(destination_path):
|
||||
os.remove(destination_path)
|
||||
|
||||
for path in utils.find_by_mask(utils.read_config()['resources-files-mask']):
|
||||
meta = utils.yaml_load(path)
|
||||
meta['base_path'] = os.path.dirname(path)
|
||||
|
||||
compiler.compile(meta)
|
||||
|
||||
@resource.command()
|
||||
def clear_all():
|
||||
click.echo('Clearing all resources')
|
||||
@ -380,7 +395,7 @@ def init_cli_resource():
|
||||
|
||||
@resource.command()
|
||||
@click.argument('name')
|
||||
@click.argument('base_path', type=click.Path(exists=True, file_okay=False))
|
||||
@click.argument('base_path', type=click.Path(exists=True, file_okay=True))
|
||||
@click.argument('args', nargs=-1)
|
||||
def create(args, base_path, name):
|
||||
args_parsed = {}
|
||||
|
@ -98,7 +98,7 @@ class LibrarianPuppet(ResourceSSHMixin):
|
||||
|
||||
puppetlabs = self._ssh_command(
|
||||
self.resource,
|
||||
'sudo', 'cat', '/tmp/puppet-modules/Puppetfile'
|
||||
'sudo', 'cat', '/var/tmp/puppet/Puppetfile'
|
||||
)
|
||||
log.debug('Puppetlabs file is: \n%s\n', puppetlabs)
|
||||
|
||||
@ -135,14 +135,14 @@ class LibrarianPuppet(ResourceSSHMixin):
|
||||
self._scp_command(
|
||||
self.resource,
|
||||
'/tmp/Puppetfile',
|
||||
'/tmp/puppet-modules/Puppetfile',
|
||||
'/var/tmp/puppet/Puppetfile',
|
||||
use_sudo=True
|
||||
)
|
||||
|
||||
self._ssh_command(
|
||||
self.resource,
|
||||
'sudo', 'librarian-puppet', 'install',
|
||||
cwd='/tmp/puppet-modules'
|
||||
cwd='/var/tmp/puppet'
|
||||
)
|
||||
|
||||
|
||||
|
20
solar/solar/core/resource/__init__.py
Normal file
20
solar/solar/core/resource/__init__.py
Normal file
@ -0,0 +1,20 @@
|
||||
__all__ = [
|
||||
'Resource',
|
||||
'assign_resources_to_nodes',
|
||||
'connect_resources',
|
||||
'create',
|
||||
'load',
|
||||
'load_all',
|
||||
'prepare_meta',
|
||||
'wrap_resource',
|
||||
]
|
||||
|
||||
|
||||
from solar.core.resource.resource import Resource
|
||||
from solar.core.resource.resource import assign_resources_to_nodes
|
||||
from solar.core.resource.resource import connect_resources
|
||||
from solar.core.resource.resource import load
|
||||
from solar.core.resource.resource import load_all
|
||||
from solar.core.resource.resource import wrap_resource
|
||||
from solar.core.resource.virtual_resource import create
|
||||
from solar.core.resource.virtual_resource import prepare_meta
|
66
solar/solar/core/resource/compiler.py
Normal file
66
solar/solar/core/resource/compiler.py
Normal file
@ -0,0 +1,66 @@
|
||||
import inflection
|
||||
import os
|
||||
import pprint
|
||||
|
||||
from solar.core import resource
|
||||
from solar import utils
|
||||
|
||||
|
||||
RESOURCE_HEADER_TEMPLATE = """
|
||||
from solar.core.resource import Resource
|
||||
"""
|
||||
|
||||
|
||||
RESOURCE_CLASS_TEMPLATE = """
|
||||
|
||||
|
||||
class {class_name}(Resource):
|
||||
_metadata = {{
|
||||
'actions': {meta_actions},
|
||||
'actions_path': '{actions_path}',
|
||||
'base_path': '{base_path}',
|
||||
'input': {meta_input},
|
||||
'handler': '{handler}',
|
||||
}}
|
||||
|
||||
{input_properties}
|
||||
"""
|
||||
|
||||
|
||||
RESOURCE_INPUT_PROPERTY_TEMPLATE = """
|
||||
@property
|
||||
def {name}(self):
|
||||
return self.args['{name}']
|
||||
|
||||
@{name}.setter
|
||||
def {name}(self, value):
|
||||
#self.args['{name}'].value = value
|
||||
#self.set_args_from_dict({{'{name}': value}})
|
||||
self.update({{'{name}': value}})
|
||||
"""
|
||||
|
||||
|
||||
def compile(meta):
|
||||
destination_file = utils.read_config()['resources-compiled-file']
|
||||
|
||||
resource.prepare_meta(meta)
|
||||
meta['class_name'] = '{}Resource'.format(
|
||||
inflection.camelize(meta['base_name'])
|
||||
)
|
||||
meta['meta_actions'] = pprint.pformat(meta['actions'])
|
||||
meta['meta_input'] = pprint.pformat(meta['input'])
|
||||
|
||||
print meta['base_name'], meta['class_name']
|
||||
|
||||
if not os.path.exists(destination_file):
|
||||
with open(destination_file, 'w') as f:
|
||||
f.write(RESOURCE_HEADER_TEMPLATE.format(**meta))
|
||||
|
||||
with open(destination_file, 'a') as f:
|
||||
input_properties = '\n'.join(
|
||||
RESOURCE_INPUT_PROPERTY_TEMPLATE.format(name=name)
|
||||
for name in meta['input']
|
||||
)
|
||||
f.write(RESOURCE_CLASS_TEMPLATE.format(
|
||||
input_properties=input_properties, **meta)
|
||||
)
|
@ -17,9 +17,16 @@ db = get_db()
|
||||
|
||||
|
||||
class Resource(object):
|
||||
_metadata = {}
|
||||
|
||||
def __init__(self, name, metadata, args, tags=None, virtual_resource=None):
|
||||
self.name = name
|
||||
self.metadata = metadata
|
||||
if metadata:
|
||||
self.metadata = metadata
|
||||
else:
|
||||
self.metadata = deepcopy(self._metadata)
|
||||
|
||||
self.metadata['id'] = name
|
||||
|
||||
self.tags = tags or []
|
||||
self.virtual_resource = virtual_resource
|
@ -8,7 +8,7 @@ from jinja2 import Template, Environment, meta
|
||||
|
||||
from solar import utils
|
||||
from solar.core import validation
|
||||
from solar.core import resource as resource_module
|
||||
from solar.core.resource import load_all, Resource
|
||||
from solar.core import provider
|
||||
from solar.core import signals
|
||||
|
||||
@ -23,17 +23,17 @@ def create_resource(name, base_path, args, virtual_resource=None):
|
||||
metadata = utils.yaml_load(base_meta_file)
|
||||
metadata['id'] = name
|
||||
metadata['version'] = '1.0.0'
|
||||
metadata['actions'] = {}
|
||||
metadata['actions_path'] = actions_path
|
||||
metadata['base_path'] = os.path.abspath(base_path)
|
||||
|
||||
prepare_meta(metadata)
|
||||
|
||||
if os.path.exists(actions_path):
|
||||
for f in os.listdir(actions_path):
|
||||
metadata['actions'][os.path.splitext(f)[0]] = f
|
||||
|
||||
tags = metadata.get('tags', [])
|
||||
|
||||
resource = resource_module.Resource(name, metadata, args, tags, virtual_resource)
|
||||
resource = Resource(name, metadata, args, tags, virtual_resource)
|
||||
return resource
|
||||
|
||||
|
||||
@ -54,7 +54,7 @@ def create_virtual_resource(vr_name, template):
|
||||
emitter, src = arg.split('::')
|
||||
connections.append((emitter, name, {src: key}))
|
||||
|
||||
db = resource_module.load_all()
|
||||
db = load_all()
|
||||
for emitter, reciver, mapping in connections:
|
||||
emitter = db[emitter]
|
||||
reciver = db[reciver]
|
||||
@ -83,11 +83,22 @@ def create(name, base_path, kwargs, virtual_resource=None):
|
||||
return resources
|
||||
|
||||
|
||||
def prepare_meta(meta):
|
||||
actions_path = os.path.join(meta['base_path'], 'actions')
|
||||
meta['actions_path'] = actions_path
|
||||
meta['base_name'] = os.path.split(meta['base_path'])[-1]
|
||||
|
||||
meta['actions'] = {}
|
||||
if os.path.exists(meta['actions_path']):
|
||||
for f in os.listdir(meta['actions_path']):
|
||||
meta['actions'][os.path.splitext(f)[0]] = f
|
||||
|
||||
|
||||
def validate_resources():
|
||||
db = resource_module.load_all()
|
||||
db = load_all()
|
||||
all_errors = []
|
||||
for r in db.values():
|
||||
if not isinstance(r, resource_module.Resource):
|
||||
if not isinstance(r, Resource):
|
||||
continue
|
||||
|
||||
errors = validation.validate_resource(r)
|
||||
@ -101,7 +112,7 @@ def find_inputs_without_source():
|
||||
|
||||
:return: [(resource_name, input_name)]
|
||||
"""
|
||||
resources = resource_module.load_all()
|
||||
resources = load_all()
|
||||
|
||||
ret = set([(r.name, input_name) for r in resources.values()
|
||||
for input_name in r.args])
|
||||
@ -132,7 +143,7 @@ def find_missing_connections():
|
||||
"""
|
||||
ret = set()
|
||||
|
||||
resources = resource_module.load_all()
|
||||
resources = load_all()
|
||||
|
||||
inputs_without_source = find_inputs_without_source()
|
||||
|
Loading…
x
Reference in New Issue
Block a user