Merge branch 'master' into librarian

Conflicts:
	examples/openstack/openstack.py
This commit is contained in:
Łukasz Oleś 2015-10-20 13:24:58 +02:00
commit 8c1dc6623a
39 changed files with 925 additions and 34 deletions

4
.gitignore vendored
View File

@ -41,3 +41,7 @@ vagrant-settings.yaml
.tox .tox
solar/.coverage solar/.coverage
# pytest cache
solar/.cache

17
Vagrantfile vendored
View File

@ -32,8 +32,10 @@ end
SLAVES_COUNT = cfg["slaves_count"] SLAVES_COUNT = cfg["slaves_count"]
SLAVES_RAM = cfg["slaves_ram"] SLAVES_RAM = cfg["slaves_ram"]
SLAVES_IPS = cfg["slaves_ips"]
SLAVES_IMAGE = cfg["slaves_image"] SLAVES_IMAGE = cfg["slaves_image"]
MASTER_RAM = cfg["master_ram"] MASTER_RAM = cfg["master_ram"]
MASTER_IPS = cfg["master_ips"]
MASTER_IMAGE = cfg["master_image"] MASTER_IMAGE = cfg["master_image"]
SYNC_TYPE = cfg["sync_type"] SYNC_TYPE = cfg["sync_type"]
MASTER_CPUS = cfg["master_cpus"] MASTER_CPUS = cfg["master_cpus"]
@ -69,7 +71,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED
config.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private" config.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
config.vm.provision "file", source: "bootstrap/ansible.cfg", destination: "/home/vagrant/.ansible.cfg" config.vm.provision "file", source: "bootstrap/ansible.cfg", destination: "/home/vagrant/.ansible.cfg"
config.vm.network "private_network", ip: "10.0.0.2"
config.vm.host_name = "solar-dev" config.vm.host_name = "solar-dev"
config.vm.provider :virtualbox do |v| config.vm.provider :virtualbox do |v|
@ -104,6 +105,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.synced_folder ".", "/vagrant", rsync: "nfs", config.vm.synced_folder ".", "/vagrant", rsync: "nfs",
rsync__args: ["--verbose", "--archive", "--delete", "-z"] rsync__args: ["--verbose", "--archive", "--delete", "-z"]
end end
ind = 0
MASTER_IPS.each do |ip|
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
ind = ind + 1
end
end end
SLAVES_COUNT.times do |i| SLAVES_COUNT.times do |i|
@ -120,7 +127,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.provision "shell", inline: slave_script, privileged: true config.vm.provision "shell", inline: slave_script, privileged: true
config.vm.provision "shell", inline: solar_script, privileged: true config.vm.provision "shell", inline: solar_script, privileged: true
config.vm.provision "shell", inline: slave_celery, privileged: true config.vm.provision "shell", inline: slave_celery, privileged: true
config.vm.network "private_network", ip: "10.0.0.#{ip_index}" #TODO(bogdando) figure out how to configure multiple interfaces when was not PREPROVISIONED
ind = 0
SLAVES_IPS.each do |ip|
config.vm.network :private_network, ip: "#{ip}#{ip_index}", :dev => "solbr#{ind}", :mode => 'nat'
ind = ind + 1
end
else else
# Disable attempts to install guest os and check that node is booted using ssh, # Disable attempts to install guest os and check that node is booted using ssh,
# because nodes will have ip addresses from dhcp, and vagrant doesn't know # because nodes will have ip addresses from dhcp, and vagrant doesn't know
@ -165,7 +177,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
rsync__args: ["--verbose", "--archive", "--delete", "-z"] rsync__args: ["--verbose", "--archive", "--delete", "-z"]
end end
end end
end end
end end

View File

@ -30,6 +30,8 @@
- python-virtualenv - python-virtualenv
# Required by packer # Required by packer
- build-essential - build-essential
# for torrent transport
- python-libtorrent
# PIP # PIP
#- apt: name=python-pip state=absent #- apt: name=python-pip state=absent

View File

@ -9,20 +9,26 @@ vagrant plugin install vagrant-libvirt
If you do not have already vagrant box for VirtualBox, install it: If you do not have already vagrant box for VirtualBox, install it:
```bash ```bash
vagrant box add cgenie/solar-master vagrant box add solar-project/solar-master
``` ```
To use this box in libvirt you need to convert it using `vagrant-mutate` plugin: To use this box in libvirt you need to convert it using `vagrant-mutate` plugin:
```bash ```bash
vagrant plugin install vagrant-mutate vagrant plugin install vagrant-mutate
vagrant mutate cgenie/solar-master libvirt vagrant mutate solar-project/solar-master libvirt
``` ```
You can also change `sync_type` in your custom `vagrant-settings.yml` file. You can also change `sync_type` in your custom `vagrant-settings.yaml` file
copied from the `vagrant-settings.yaml_defaults`.
# Use solar # Use solar
``` bash ``` bash
vagrant up --provider libvirt vagrant up --provider libvirt
``` ```
(TODO automation required) After that, copy (or create, if missing) the ssh
private keys for nodes to the `.vagrant/machines/solar-dev*/virtualbox` dirs.
And make sure the public keys are listed in the `authorized_keys` files for the
`solar-dev*` nodes.

View File

@ -22,3 +22,9 @@ debug info.
``` ```
solar res action run ceph_mon1 solar res action run ceph_mon1
``` ```
To add repositories use
```
solar resource create apt1 templates/mos_repos.yaml node=node1 index=1
```

View File

@ -53,11 +53,15 @@ def deploy():
'role': 'controller', 'role': 'controller',
})[0] })[0]
managed_apt = vr.create(
'managed_apt1', 'templates/mos_repos.yaml',
{'node': first_node.name, 'index': 0})[-1]
keys.connect(ceph_mon, {}) keys.connect(ceph_mon, {})
first_node.connect(ceph_mon, first_node.connect(ceph_mon,
{'ip': ['ip', 'public_vip', 'management_vip']}) {'ip': ['ip', 'public_vip', 'management_vip']})
library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'}) library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'})
managed_apt.connect(ceph_mon, {})
if __name__ == '__main__': if __name__ == '__main__':
deploy() deploy()

View File

@ -49,6 +49,9 @@ def setup_base():
resources = vr.create('nodes', 'templates/nodes_with_transports.yaml', {"count": 2}) resources = vr.create('nodes', 'templates/nodes_with_transports.yaml', {"count": 2})
nodes = [x for x in resources if x.name.startswith('node')] nodes = [x for x in resources if x.name.startswith('node')]
node1, node2 = nodes node1, node2 = nodes
resources = vr.create('nodes_network', 'templates/nodes_network.yaml', {"count": 2})
nodes_sdn = [x for x in resources if x.name.startswith('node')]
node1_sdn, node2_sdn = nodes_sdn
# LIBRARIAN # LIBRARIAN
librarian_node1 = vr.create('librarian_node1', 'resources/librarian', {})[0] librarian_node1 = vr.create('librarian_node1', 'resources/librarian', {})[0]
@ -57,6 +60,16 @@ def setup_base():
node1.connect(librarian_node1, {}) node1.connect(librarian_node1, {})
node2.connect(librarian_node2, {}) node2.connect(librarian_node2, {})
# NETWORKING
# TODO(bogdando) node's IPs should be populated as br-mgmt IPs, but now are hardcoded in templates
signals.connect(node1, node1_sdn)
node1_sdn.connect_with_events(librarian_node1, {'module': 'modules'}, {})
evapi.add_dep(librarian_node1.name, node1_sdn.name, actions=('run',))
signals.connect(node2, node2_sdn)
node2_sdn.connect_with_events(librarian_node2, {'module': 'modules'}, {})
evapi.add_dep(librarian_node2.name, node2_sdn.name, actions=('run',))
# MARIADB # MARIADB
mariadb_service = vr.create('mariadb_service1', 'resources/mariadb_service', { mariadb_service = vr.create('mariadb_service1', 'resources/mariadb_service', {
'image': 'mariadb', 'image': 'mariadb',
@ -89,6 +102,8 @@ def setup_base():
}) })
return {'node1': node1, return {'node1': node1,
'node2': node2, 'node2': node2,
'node1_sdn': node1_sdn,
'node2_sdn': node2_sdn,
'librarian_node1': librarian_node1, 'librarian_node1': librarian_node1,
'librarian_node2': librarian_node2, 'librarian_node2': librarian_node2,
'mariadb_service': mariadb_service, 'mariadb_service': mariadb_service,

View File

@ -0,0 +1,25 @@
Example of using torrent transport with solar. Torrent is used to distribute task data. After fetching is finished torrent client forks and continues seeding.
The example contains single node with single host mapping + transports.
Execute:
```
python examples/torrent/example.py
solar changes stage
solar changes process
solar orch run-once last
```
Wait for finish:
```
solar orch report last -w 100
```
After this you should see new entry in `/etc/hosts` file.
* All created torrents are in `/vagrant/torrents`, it doesn't need to be shared
* Initial seeding is done using torrent file
* Downloading and then seeding is always done with magnetlinks

View File

@ -0,0 +1,74 @@
import time
from solar.core.resource import virtual_resource as vr
from solar import errors
from solar.interfaces.db import get_db
db = get_db()
def run():
db.clear()
node = vr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()),
'ip': '10.0.0.3',
'node_id': 'node1',
})[0]
transports = vr.create('transports_node1', 'resources/transports')[0]
ssh_transport = vr.create('ssh_transport', 'resources/transport_ssh',
{'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key',
'ssh_user': 'vagrant'})[0]
transports.connect(node, {})
# it uses reverse mappings
ssh_transport.connect(transports, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'})
hosts = vr.create('hosts_file', 'resources/hosts_file', {})[0]
# let's add torrent transport for hosts file deployment (useless in real life)
torrent_transport = vr.create('torrent_transport',
'resources/transport_torrent',
{'trackers': ['udp://open.demonii.com:1337',
'udp://tracker.openbittorrent.com:80']})[0]
# you could use any trackers as you want
transports_for_torrent = vr.create(
'transports_for_torrent', 'resources/transports')[0]
transports_for_torrent.connect(torrent_transport, {})
ssh_transport.connect_with_events(transports_for_torrent, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'},
events={})
transports_for_hosts = vr.create(
'transports_for_hosts', 'resources/transports')[0]
torrent_transport.connect(transports_for_hosts, {'trackers': 'transports:trackers',
'name': 'transports:name'})
ssh_transport.connect(transports_for_hosts, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'})
transports_for_hosts.connect(hosts)
transports_for_hosts.connect_with_events(node, events={})
node.connect(hosts, {
'ip': 'hosts:ip',
'name': 'hosts:name'
})
run()

View File

@ -0,0 +1,9 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- shell: rm -f {{item}}
with_items:
- /etc/apt/sources.list.d/{{name}}.list
- /etc/apt/preferences.d/{{name}}.pref
- shell: apt-get update
when: {{validate_integrity}}

View File

@ -0,0 +1,11 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- template:
src: {{templates_dir}}/source
dest: /etc/apt/sources.list.d/{{name}}.list
- template:
src: {{templates_dir}}/preferences
dest: /etc/apt/preferences.d/{{name}}.pref
- shell: apt-get update
when: {{validate_integrity}}

View File

@ -0,0 +1,25 @@
id: apt_repo
handler: ansible
version: 1.0.0
input:
ip:
schema: str!
value:
repo:
schema: str!
value:
name:
schema: str!
value:
package:
schema: str
value: '*'
pin:
schema: str
value:
pin_priority:
schema: int
value:
validate_integrity:
schema: bool
value: true

View File

@ -0,0 +1,3 @@
Package: {{package}}
Pin: {{pin}}
Pin-Priority: {{pin_priority}}

View File

@ -0,0 +1 @@
{{repo}}

View File

@ -0,0 +1,7 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- shell: echo 'Managed by solar' > /etc/apt/sources.list
when: {{ensure_other_removed}}
- shell: apt-get update
when: {{ensure_other_removed}}

View File

@ -0,0 +1,17 @@
# This resource will clean
id: apt_repo_manager
handler: ansible
version: 1.0.0
input:
ip:
schema: str!
value:
repos:
schema: [str!]
value:
names:
schema: [str!]
value:
ensure_other_removed:
schema: bool
value: true

View File

@ -0,0 +1,9 @@
# Node network resource for puppet handler
Setup and configure L23 networking for a node.
Leverages the powerful network_scheme structures to
create all required networking entities like interfaces,
bridges, bonds - both linux and ovs based.
Defaults are given for Debian OS family.
source https://github.com/xenolog/l23network

View File

@ -0,0 +1,3 @@
class {'l23network':
ensure_package => 'absent',
}

View File

@ -0,0 +1,41 @@
$resource = hiera($::resource_name)
$ensure_package = $resource['input']['ensure_package']['value']
$use_lnx = $resource['input']['use_lnx']['value']
$use_ovs = $resource['input']['use_ovs']['value']
$install_ovs = $resource['input']['install_ovs']['value']
$install_brtool = $resource['input']['install_brtool']['value']
$install_ethtool = $resource['input']['install_ethtool']['value']
$install_bondtool = $resource['input']['install_bondtool']['value']
$install_vlantool = $resource['input']['install_vlantool']['value']
$ovs_modname = $resource['input']['ovs_modname']['value']
$ovs_datapath_package_name = $resource['input']['ovs_datapath_package_name']['value']
$ovs_common_package_name = $resource['input']['ovs_common_package_name']['value']
$network_scheme = $resource['input']['network_scheme']['value']
class {'l23network':
ensure_package => $ensure_package,
use_lnx => $use_lnx,
use_ovs => $use_ovs,
install_ovs => $install_ovs,
install_brtool => $install_brtool,
install_ethtool => $install_ethtool,
install_bondtool => $install_bondtool,
install_vlantool => $install_vlantool,
ovs_modname => $ovs_modname,
ovs_datapath_package_name => $ovs_datapath_package_name,
ovs_common_package_name => $ovs_common_package_name,
}
prepare_network_config($network_scheme)
$sdn = generate_network_config()
notify { $sdn: require => Class['l23network'], }
# We need to wait at least 30 seconds for the bridges and other interfaces to
# come up after being created. This should allow for all interfaces to be up
# and ready for traffic before proceeding with further deploy steps. LP#1458954
exec { 'wait-for-interfaces':
path => '/usr/bin:/bin',
command => 'sleep 32',
require => Notify[$sdn]
}

View File

@ -0,0 +1,60 @@
id: node_network_puppet
handler: puppet
puppet_module: l23network
version: 1.0.0
input:
package_ensure:
schema: str
value: 'present'
ensure_package:
schema: str
value: 'present'
use_lnx:
schema: bool
value: true
use_ovs:
schema: bool
value: false
install_ovs:
schema: bool
value: true
install_brtool:
schema: bool
value: true
install_ethtool:
schema: bool
value: true
install_bondtool:
schema: bool
value: true
install_vlantool:
schema: bool
value: true
ovs_modname:
schema: str
value: 'openvswitch'
ovs_datapath_package_name:
schema: str
value: 'openvswitch-datapath-dkms'
ovs_common_package_name:
schema: str
value: 'openvswitch-switch'
network_scheme:
schema: {
version: str!,
provider: str!,
interfaces: {},
transformations: [{}],
endpoints: {},
roles: {},
}
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/xenolog/l23network', branch: '50098cfa1f0f8e8d58e6a6b77a22f1380aa5c426'}
ip:
schema: str!
value:
tags: [resources/node_network]

View File

@ -0,0 +1,11 @@
import requests
from solar.core.log import log
def test(resource):
log.debug('Testing node_network_puppet')
# requests.get(
# 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
# TODO(bogdando) figure out how to test this
# )

View File

@ -0,0 +1,9 @@
- hosts: [{{ host }}]
sudo: yes
tasks:
- apt:
name: python-libtorrent
state: present
- copy:
src: {{scripts_dir}}/solar_torrent.py
dest: /var/tmp/solar_torrent.py

View File

@ -0,0 +1,18 @@
id: transport_torrent
handler: ansible
input:
trackers:
schema: [str!]
value: []
name:
schema: str!
value: torrent
location_id:
schema: str
value:
reverse: True
is_own: False
transports_id:
schema: str
value:
is_emit: False

View File

@ -0,0 +1 @@
../../../solar/solar/core/transports/helpers/solar_torrent.py

View File

@ -1,7 +1,7 @@
id: transports id: transports
input: input:
transports: transports:
schema: [{user: str, password: str, port: int!, key: str, name: str!}] schema: [{user: str, password: str, port: int!, key: str, name: str!, trackers: [str]}]
value: [] value: []
transports_id: transports_id:
schema: str! schema: str!

View File

@ -1 +0,0 @@
{}

View File

@ -179,7 +179,7 @@ def dg(uid, start, end):
if errors: if errors:
raise click.ClickException('\n'.join(errors)) raise click.ClickException('\n'.join(errors))
utils.write_graph(plan) utils.write_graph(plan)
click.echo('Created {name}.png'.format(name=plan.graph['name'])) click.echo('Created {name}.svg'.format(name=plan.graph['name']))
@orchestration.command() @orchestration.command()

View File

@ -17,7 +17,7 @@ from fabric.state import env
import os import os
from solar.core.log import log from solar.core.log import log
from solar.core.handlers.base import TempFileHandler from solar.core.handlers.base import TempFileHandler, SOLAR_TEMP_LOCAL_LOCATION
from solar import errors from solar import errors
# otherwise fabric will sys.exit(1) in case of errors # otherwise fabric will sys.exit(1) in case of errors
@ -41,7 +41,11 @@ class AnsibleTemplate(TempFileHandler):
self.transport_sync.copy(resource, '/vagrant/library', '/tmp') self.transport_sync.copy(resource, '/vagrant/library', '/tmp')
self.transport_sync.sync_all() self.transport_sync.sync_all()
call_args = ['ansible-playbook', '--module-path', '/tmp/library', '-i', inventory_file, playbook_file] # remote paths are not nested inside solar_local
remote_playbook_file = playbook_file.replace(SOLAR_TEMP_LOCAL_LOCATION, '/tmp/')
remote_inventory_file = inventory_file.replace(SOLAR_TEMP_LOCAL_LOCATION, '/tmp/')
call_args = ['ansible-playbook', '--module-path', '/tmp/library', '-i', remote_inventory_file, remote_playbook_file]
log.debug('EXECUTING: %s', ' '.join(call_args)) log.debug('EXECUTING: %s', ' '.join(call_args))
out = self.transport_run.run(resource, *call_args) out = self.transport_run.run(resource, *call_args)

View File

@ -16,6 +16,7 @@
import os import os
import shutil import shutil
import tempfile import tempfile
import errno
from jinja2 import Template from jinja2 import Template
@ -23,6 +24,11 @@ from solar.core.log import log
from solar.core.transports.ssh import SSHSyncTransport, SSHRunTransport from solar.core.transports.ssh import SSHSyncTransport, SSHRunTransport
tempfile.gettempdir()
SOLAR_TEMP_LOCAL_LOCATION = os.path.join(tempfile.tempdir, 'solar_local')
class BaseHandler(object): class BaseHandler(object):
def __init__(self, resources, handlers=None): def __init__(self, resources, handlers=None):
@ -46,9 +52,17 @@ class BaseHandler(object):
class TempFileHandler(BaseHandler): class TempFileHandler(BaseHandler):
def __init__(self, resources, handlers=None): def __init__(self, resources, handlers=None):
super(TempFileHandler, self).__init__(resources, handlers) super(TempFileHandler, self).__init__(resources, handlers)
self.dst = tempfile.mkdtemp() self.dst = None
def __enter__(self): def __enter__(self):
try:
self.dst = tempfile.mkdtemp(dir=SOLAR_TEMP_LOCAL_LOCATION)
except OSError as ex:
if ex.errno == errno.ENOENT:
os.makedirs(SOLAR_TEMP_LOCAL_LOCATION)
self.dst = tempfile.mkdtemp(dir=SOLAR_TEMP_LOCAL_LOCATION)
else:
raise
self.dirs = {} self.dirs = {}
for resource in self.resources: for resource in self.resources:
resource_dir = tempfile.mkdtemp(suffix=resource.name, dir=self.dst) resource_dir = tempfile.mkdtemp(suffix=resource.name, dir=self.dst)

View File

@ -15,8 +15,9 @@
from solar.core.log import log from solar.core.log import log
from solar import errors from solar import errors
import os
from solar.core.handlers.base import TempFileHandler from solar.core.handlers.base import TempFileHandler, SOLAR_TEMP_LOCAL_LOCATION
class Shell(TempFileHandler): class Shell(TempFileHandler):
@ -24,9 +25,14 @@ class Shell(TempFileHandler):
action_file = self._compile_action_file(resource, action_name) action_file = self._compile_action_file(resource, action_name)
log.debug('action_file: %s', action_file) log.debug('action_file: %s', action_file)
action_file_name = '/tmp/{}.sh'.format(resource.name) action_file_name = os.path.join(self.dirs[resource.name], action_file)
self.transport_sync.copy(resource, action_file, action_file_name) action_file_name = action_file_name.replace(SOLAR_TEMP_LOCAL_LOCATION, '/tmp/')
self._copy_templates_and_scripts(resource, action_name)
self.transport_sync.copy(resource, self.dst, '/tmp')
self.transport_sync.sync_all() self.transport_sync.sync_all()
cmd = self.transport_run.run( cmd = self.transport_run.run(
resource, resource,
'bash', action_file_name, 'bash', action_file_name,

View File

@ -83,16 +83,23 @@ class SolarTransport(object):
pass pass
def get_transport_data(self, resource, name=None): def get_transport_data(self, resource, name=None):
key = '_used_transport_%s' % self._mode
# TODO: naive object local cache # TODO: naive object local cache
try: try:
transport = resource._used_transport transport = getattr(resource, key)
except AttributeError: except AttributeError:
if name is None: if name is None:
name = self.preffered_transport_name name = self.preffered_transport_name
transport = next(x for x in resource.transports() if x['name'] == name) transport = next(x for x in resource.transports() if x['name'] == name)
setattr(resource, '_used_transport', transport) setattr(resource, key, transport)
return transport return transport
def other(self, resource):
return self._other
def bind_with(self, other):
self._other = other
class SyncTransport(SolarTransport): class SyncTransport(SolarTransport):
""" """
@ -106,11 +113,6 @@ class SyncTransport(SolarTransport):
super(SyncTransport, self).__init__() super(SyncTransport, self).__init__()
self.executors = [] self.executors = []
def bind_with(self, other):
# we migth add there something later
# like compat checking etc
self.other = other
def copy(self, resource, *args, **kwargs): def copy(self, resource, *args, **kwargs):
pass pass
@ -157,11 +159,6 @@ class RunTransport(SolarTransport):
def get_result(self, *args, **kwargs): def get_result(self, *args, **kwargs):
raise NotImplementedError() raise NotImplementedError()
def bind_with(self, other):
# we migth add there something later
# like compat checking etc
self.other = other
def run(self, resource, *args, **kwargs): def run(self, resource, *args, **kwargs):
pass pass

View File

@ -1,21 +1,39 @@
from solar.core.transports.base import SyncTransport, RunTransport, SolarTransport from solar.core.transports.base import SyncTransport, RunTransport, SolarTransport
from solar.core.transports.ssh import SSHSyncTransport, SSHRunTransport from solar.core.transports.ssh import SSHSyncTransport, SSHRunTransport
from solar.core.transports.rsync import RsyncSyncTransport from solar.core.transports.rsync import RsyncSyncTransport
from solar.core.transports.solard_transport import SolardRunTransport, SolardSyncTransport try:
from solar.core.transports.solard_transport import SolardRunTransport, SolardSyncTransport
except ImportError:
_solard_available = False
else:
_solard_available = True
try:
from solar.core.transports.torrent import TorrentSyncTransport
except ImportError:
_torrent_available = False
else:
_torrent_available = True
KNOWN_SYNC_TRANSPORTS = { KNOWN_SYNC_TRANSPORTS = {
'solard': SolardSyncTransport,
'rsync': RsyncSyncTransport, 'rsync': RsyncSyncTransport,
'ssh': SSHSyncTransport 'ssh': SSHSyncTransport
} }
KNOWN_RUN_TRANSPORTS = { KNOWN_RUN_TRANSPORTS = {
'solard': SolardRunTransport,
'ssh': SSHRunTransport 'ssh': SSHRunTransport
} }
if _torrent_available:
KNOWN_SYNC_TRANSPORTS['torrent'] = TorrentSyncTransport
if _solard_available:
KNOWN_SYNC_TRANSPORTS['solard'] = SolardSyncTransport
KNOWN_RUN_TRANSPORTS['solard'] = SolardRunTransport
class OnAll(object): class OnAll(object):
def __init__(self, target): def __init__(self, target):
@ -50,9 +68,10 @@ class BatTransport(SolarTransport):
if not selected: if not selected:
raise Exception("No valid transport found") raise Exception("No valid transport found")
instance = self._bat_transports[selected['name']]() instance = self._bat_transports[selected['name']]()
setattr(resource, '_used_transport', selected) setattr(resource, '_used_transport_%s' % instance._mode, selected)
setattr(resource, key_name, instance) setattr(resource, key_name, instance)
self._used_transports.append(instance) self._used_transports.append(instance)
instance.bind_with(self._other_remember)
return instance return instance
# return self._bat_transports[selected['name']] # return self._bat_transports[selected['name']]
@ -60,11 +79,14 @@ class BatTransport(SolarTransport):
self.select_valid_transport(resource) self.select_valid_transport(resource)
return super(BatTransport, self).get_transport_data(resource, *args, **kwargs) return super(BatTransport, self).get_transport_data(resource, *args, **kwargs)
def bind_with(self, other):
self._other_remember = other
class BatSyncTransport(SyncTransport, BatTransport): class BatSyncTransport(SyncTransport, BatTransport):
preffered_transport_name = None preffered_transport_name = None
_order = ('solard', 'rsync', 'ssh') _order = ('torrent', 'solard', 'rsync', 'ssh')
_bat_transports = KNOWN_SYNC_TRANSPORTS _bat_transports = KNOWN_SYNC_TRANSPORTS
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,173 @@
# TODO: change to something less naive
#
import libtorrent as lt
from operator import attrgetter
import time
import sys
import os
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume']
class MultiTorrent(object):
def __init__(self, torrents, ses):
self.torrents = torrents
self.ses = ses
def force_reannounce(self):
for torrent in self.torrents:
torrent.force_reannounce()
@property
def is_seeding(self):
for torrent in self.torrents:
status = torrent.status()
if state_str[status.state] != 'seeding':
return False
return True
@property
def progress(self):
total_progress = map(attrgetter('progress'), map(lambda x: x.status(), self.torrents))
return sum(total_progress) / len(total_progress)
def numbers(self):
seeding = 0
downloading = 0
for torrent in self.torrents:
if torrent.status().is_seeding:
seeding += 1
else:
downloading += 1
return seeding, downloading
def init_session(args, seed=False):
ses = lt.session()
all_torrents = []
for save_path, magnet_or_path in args:
if os.path.exists(magnet_or_path):
e = lt.bdecode(open(magnet_or_path, 'rb').read())
info = lt.torrent_info(e)
params = { 'save_path': save_path,
'storage_mode': lt.storage_mode_t.storage_mode_sparse,
'ti': info,
'seed_mode': seed}
h = ses.add_torrent(params)
else:
h = ses.add_torrent({
'save_path': save_path,
'storage_mode': lt.storage_mode_t.storage_mode_sparse,
'url': magnet_or_path,
'seed_mode': seed})
all_torrents.append(h)
return ses, all_torrents
def _daemonize():
# should be true daemonize
new_pid = os.fork()
if new_pid > 0:
# first
sys.exit(0)
os.setsid()
new_pid2 = os.fork()
if new_pid2 > 0:
sys.exit(0)
stdin = file(os.devnull, 'r')
stdout = file(os.devnull, 'a+')
stderr = file(os.devnull, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
def _seeder(torrents, save_path='.', max_seed_ratio=5):
_daemonize()
no_peers = 120
max_alive = 5 * 60
ses, all_torrents = init_session(torrents, seed=True)
mt = MultiTorrent(all_torrents, ses)
end = time.time() + max_alive
peers_0 = time.time()
i = 0
while not time.time() > end:
now = time.time()
i += 1
# if i % 10 == 0 and i != 0:
# mt.force_reannounce()
s = ses.status()
# if not mt.is_seeding:
# sys.exit("Was seeder mode but not seeding")
if peers_0 < now - no_peers:
sys.exit("No peers for %d seconds exiting" % no_peers)
if i % 5 == 0:
print "%.2f%% up=%.1f kB/s peers=%s total_upload_B=%.1f" \
% (mt.progress * 100,
s.upload_rate / 1000,
s.num_peers,
(s.total_upload))
if s.num_peers != 0:
peers_0 = now
sys.stdout.flush()
time.sleep(1)
else:
print 'Seed timeout exiting'
sys.exit(0)
def _getter(torrents, max_seed_ratio=3):
ses = lt.session()
ses.listen_on(6881, 6981)
max_no_changes = 1 * 60
ses, all_torrents = init_session(torrents)
mt = MultiTorrent(all_torrents, ses)
i = 0
last_state = (time.time(), None)
while (not mt.is_seeding):
i += 1
# if i % 10 == 0 and i != 0:
# mt.force_reannounce()
s = ses.status()
if i % 5 == 0:
print '%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d) %s' % \
(mt.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, mt.numbers())
now = time.time()
current_state = (now, mt.progress)
if current_state[-1] != last_state[-1]:
last_state = current_state
if last_state[0] < now - max_no_changes:
sys.exit("Failed to fetch torrents in %ds" % max_no_changes)
time.sleep(0.5)
if mt.progress == 1:
# ok
# torrent lib dislikes forks there
from subprocess import check_output
import sys
args = sys.argv[:]
args[-2] = 's'
args.insert(0, sys.executable)
print "Entering seeder mode"
check_output(args, shell=False)
else:
# err
sys.exit(1)
if __name__ == '__main__':
mode = sys.argv[1]
torrents = sys.argv[2]
torrents = [x.split('|') for x in torrents.split(';')]
print repr(torrents)
if mode == 'g':
_getter(torrents, *sys.argv[3:])
elif mode == 's':
_seeder(torrents, *sys.argv[3:])
else:
sys.exit("`s` or `g` needed")

View File

@ -0,0 +1,118 @@
from solar.core.log import log
from solar.core.transports.ssh import (SSHSyncTransport,
SSHRunTransport)
from solar.core.transports.base import SyncTransport, Executor
import errno
from collections import defaultdict
from operator import attrgetter, itemgetter
import libtorrent as lt
import os
from uuid import uuid4
class TorrentSyncTransport(SyncTransport):
def __init__(self):
super(TorrentSyncTransport, self).__init__()
# we need some non torrent based sync transfer to upload client
self._sync_helper = SSHSyncTransport()
self._torrents = []
self._sudo_torrents = []
self._torrent_path = '/vagrant/torrents'
def bind_with(self, other):
self._sync_helper.bind_with(other)
super(TorrentSyncTransport, self).bind_with(other)
def copy(self, resource, _from, _to, use_sudo=False):
log.debug("TORRENT: %s -> %s", _from, _to)
executor = Executor(resource=resource,
executor=None,
params=(_from, _to, use_sudo))
self.executors.append(executor)
def _create_single_torrent(self, resource, _from, _to, use_sudo):
fs = lt.file_storage()
lt.add_files(fs, _from)
self._create_torrent(resource, fs, _from)
def _create_torrent_name(self):
return os.path.join(self._torrent_path, uuid4().hex + '.torrent')
def _create_torrent(self, resource, fs, root='.', use_sudo=False):
t = lt.create_torrent(fs)
transports = resource.transports()
torrent_transport = next((x for x in transports if x['name'] == 'torrent'))
trackers = torrent_transport['trackers']
for tracker in trackers:
t.add_tracker(tracker)
lt.set_piece_hashes(t, os.path.join(root, '..'))
torrent = t.generate()
torrent['priv'] = True # private torrent, no DHT, only trackers
name = self._create_torrent_name()
try:
# not checking for path existence
with open(name, 'wb') as f:
f.write(lt.bencode(torrent))
except IOError as e:
if e.errno != errno.ENOENT:
raise
os.makedirs(self._torrent_path)
with open(name, 'wb') as f:
f.write(lt.bencode(torrent))
log.debug("Created torrent file %s", name)
magnet_uri = lt.make_magnet_uri(lt.torrent_info(name))
# self._torrents[root] = (name, magnet_uri)
if not use_sudo:
self._torrents.append((name, magnet_uri, root))
else:
self._sudo_torrents.append((name, magnet_uri, root))
return name
def _start_seeding(self):
# XXX: naive naive naive
# we don't need use sudo there for now
from fabric import api as fabric_api
torrents = self._torrents + self._sudo_torrents
to_seed = ["%s|%s" % (os.path.abspath(os.path.join(x[2], '..')), x[0]) for x in torrents]
seed_args = ';'.join(to_seed)
# TODO: 'g' is just for debug, it should be 's', remove when sure
cmd = ['/usr/bin/python',
'/vagrant/solar/solar/core/transports/helpers/solar_torrent.py',
'g',
'"%s"' % seed_args]
log.debug("Will start seeding: %r" % ' '.join(cmd))
fabric_api.local(' '.join(cmd))
log.debug("Torrent seeding started")
def _start_remote_fetch(self, resource, use_sudo):
# later we will send solar_torrent with other sync tranport,
# or remote will have solar_torrent installed somehow
if use_sudo is False:
torrents = self._torrents
else:
torrents = self._sudo_torrents
to_get = ["%s|%s" % (os.path.abspath(os.path.join(x[2], '..')), x[1]) for x in torrents]
get_args = ';'.join(to_get)
cmd = ['/usr/bin/python',
'/var/tmp/solar_torrent.py',
'g',
'"%s"' % get_args]
self.other(resource).run(resource, *cmd, use_sudo=use_sudo)
def preprocess(self, executor):
_from, _to, use_sudo = executor.params
self._create_single_torrent(executor.resource, _from, _to, use_sudo)
def run_all(self):
self._start_seeding()
resource = self.executors[0].resource
# TODO: we should paralelize it
if self._torrents:
self._start_remote_fetch(resource, use_sudo=False)
if self._sudo_torrents:
self._start_remote_fetch(resource, use_sudo=True)

View File

@ -34,5 +34,5 @@ def write_graph(plan):
nx.write_dot(plan, '{name}.dot'.format(name=plan.graph['name'])) nx.write_dot(plan, '{name}.dot'.format(name=plan.graph['name']))
subprocess.call( subprocess.call(
'tred {name}.dot | dot -Tpng -o {name}.png'.format(name=plan.graph['name']), 'tred {name}.dot | dot -Tsvg -o {name}.svg'.format(name=plan.graph['name']),
shell=True) shell=True)

43
templates/mos_repos.yaml Normal file
View File

@ -0,0 +1,43 @@
id: mos_repos
resources:
- id: mos_holdback_{{index}}
from: resources/apt_repo
location: {{node}}
values:
name: mos-holdback
package: '*'
repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ mos7.0-holdback main restricted
pin: release o=Mirantis,n=mos7.0,a=mos7.0-holdback,l=mos7.0
pin_priority: 1100
- id: mos_security_{{index}}
from: resources/apt_repo
location: {{node}}
values:
name: mos
package: '*'
repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ mos7.0-security main restricted
pin: release o=Mirantis,n=mos7.0,a=mos7.0-security,l=mos7.0
pin_priority: 1050
- id: mos_updates_{{index}}
from: resources/apt_repo
location: {{node}}
values:
name: mos_update
package: '*'
repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ mos7.0-updates main restricted
pin: release o=Mirantis,a=mos7.0-updates,l=mos7.0,n=mos7.0
pin_priority: 1050
- id: managed_apt_{{index}}
from: resources/managed_apt
location: {{node}}
values:
names:
- mos_holdback_{{index}}::name
- mos_security_{{index}}::name
- mos_updates_{{index}}::name
repos:
- mos_holdback_{{index}}::repo
- mos_security_{{index}}::repo
- mos_updates_{{index}}::repo
ensure_other_removed: false

View File

@ -0,0 +1,134 @@
id: simple_multinode_gre
# eth2 - private 10.1.0.0/24 with JUMBO frames,
# eth1 - mgmt 10.0.0.0/24,
# eth3 - ext 10.2.0.0/24
resources:
- id: node1_sdn
from: resources/node_network_puppet
values:
use_ovs: true
network_scheme:
version: '1.1'
provider: lnx
interfaces:
eth3:
mtu: 1500
eth1:
mtu: 1500
eth2:
mtu: 9000
transformations:
- action: add-br
name: br-mgmt
- action: add-br
name: br-ex
- action: add-br
name: br-floating
provider: ovs
- action: add-patch
bridges:
- br-floating
- br-ex
provider: ovs
mtu: 65000
- action: add-br
name: br-mesh
- action: add-port
bridge: br-ex
name: eth3
- action: add-port
bridge: br-mgmt
name: eth1
- action: add-port
bridge: br-mesh
name: eth2
endpoints:
br-mesh:
IP:
- 10.1.0.3/24
br-floating:
IP: none
br-mgmt:
IP:
- 10.0.0.3/24
vendor_specific:
phy_interfaces:
- eth1
br-ex:
IP:
- 10.2.0.3/24
vendor_specific:
phy_interfaces:
- eth3
#gateway: 10.2.0.1
roles:
management: br-mgmt
neutron/mesh: br-mesh
ex: br-ex
neutron/floating: br-floating
fw-admin: br-fw-admin
- id: node2_sdn
from: resources/node_network_puppet
values:
use_ovs: true
network_scheme:
version: '1.1'
provider: lnx
interfaces:
eth3:
mtu: 1500
eth1:
mtu: 1500
eth2:
mtu: 9000
transformations:
- action: add-br
name: br-mgmt
- action: add-br
name: br-ex
- action: add-br
name: br-floating
provider: ovs
- action: add-patch
bridges:
- br-floating
- br-ex
provider: ovs
mtu: 65000
- action: add-br
name: br-mesh
- action: add-port
bridge: br-ex
name: eth3
- action: add-port
bridge: br-mgmt
name: eth1
- action: add-port
bridge: br-mesh
name: eth2
endpoints:
br-mesh:
IP:
- 10.1.0.4/24
br-floating:
IP: none
br-mgmt:
IP:
- 10.0.0.4/24
vendor_specific:
phy_interfaces:
- eth1
br-ex:
IP:
- 10.2.0.4/24
vendor_specific:
phy_interfaces:
- eth3
#gateway: 10.2.0.1
roles:
management: br-mgmt
neutron/mesh: br-mesh
ex: br-ex
neutron/floating: br-floating
fw-admin: br-fw-admin

View File

@ -7,7 +7,15 @@ master_image: solar-project/solar-master
slaves_image: solar-project/solar-master slaves_image: solar-project/solar-master
master_ram: 1024 master_ram: 1024
master_cpus: 1 master_cpus: 1
master_ips:
- 10.0.0.2
- 10.1.0.2
- 10.2.0.2
slaves_cpus: 1 slaves_cpus: 1
slaves_ips:
- 10.0.0.
- 10.1.0.
- 10.2.0.
# if you have virtualbox 5.x then enable it # if you have virtualbox 5.x then enable it
# if will speedup things a lot # if will speedup things a lot