From 29c20b354535cce2dcdb5fad8d17191e590c7c63 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 9 Jun 2015 15:59:43 +0300 Subject: [PATCH 1/8] Add tests with some fixes for changes stage procedure --- cli.py | 246 ++++++++++++++++++++++++++ solar/requirements.txt | 1 + solar/solar/interfaces/db/redis_db.py | 9 +- solar/solar/operations.py | 62 ++++--- solar/solar/state.py | 6 +- solar/solar/test/test_operations.py | 107 +++++++++++ 6 files changed, 402 insertions(+), 29 deletions(-) create mode 100755 cli.py create mode 100644 solar/solar/test/test_operations.py diff --git a/cli.py b/cli.py new file mode 100755 index 00000000..52509a4b --- /dev/null +++ b/cli.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python +import click +import json +#import matplotlib +#matplotlib.use('Agg') # don't show windows +#import matplotlib.pyplot as plt +import networkx as nx +import os +import subprocess + +from solar.core import actions as xa +from solar.core import resource as xr +from solar.core import signals as xs +from solar import operations +from solar import state + +from solar.interfaces.db import get_db + +db = get_db() + + +@click.group() +def cli(): + pass + + +def init_cli_resource(): + @click.group() + def resource(): + pass + + cli.add_command(resource) + + @click.command() + @click.argument('resource_path') + @click.argument('action_name') + def action(action_name, resource_path): + print 'action', resource_path, action_name + r = xr.load(resource_path) + xa.resource_action(r, action_name) + + resource.add_command(action) + + @click.command() + @click.argument('name') + @click.argument('base_path') + @click.argument('dest_path') + @click.argument('args') + def create(args, dest_path, base_path, name): + print 'create', name, base_path, dest_path, args + args = json.loads(args) + xr.create(name, base_path, dest_path, args) + + resource.add_command(create) + + @click.command() + @click.argument('resource_path') + @click.argument('tag_name') + @click.option('--add/--delete', default=True) + def tag(add, tag_name, resource_path): + print 'Tag', resource_path, tag_name, add + r = xr.load(resource_path) + if add: + r.add_tag(tag_name) + else: + r.remove_tag(tag_name) + r.save() + + resource.add_command(tag) + + @click.command() + @click.argument('path') + @click.option('--all/--one', default=False) + @click.option('--tag', default=None) + @click.option('--use-json/--no-use-json', default=False) + def show(use_json, tag, all, path): + import json + import six + + printer = lambda r: six.print_(r) + if use_json: + printer = lambda r: six.print_(json.dumps(r.to_dict())) + + if all or tag: + for name, resource in xr.load_all(path).items(): + show = True + if tag: + if tag not in resource.tags: + show = False + + if show: + printer(resource) + else: + printer(xr.load(path)) + + resource.add_command(show) + + @click.command() + @click.argument('name') + @click.argument('args') + def update(name, args): + args = json.loads(args) + all = xr.load_all() + r = all[name] + r.update(args) + resource.add_command(update) + + +def init_cli_connect(): + @click.command() + @click.argument('emitter') + @click.argument('receiver') + @click.option('--mapping', default=None) + def connect(mapping, receiver, emitter): + print 'Connect', emitter, receiver + emitter = xr.load(emitter) + receiver = xr.load(receiver) + print emitter + print receiver + if mapping is not None: + mapping = json.loads(mapping) + xs.connect(emitter, receiver, mapping=mapping) + + cli.add_command(connect) + + @click.command() + @click.argument('emitter') + @click.argument('receiver') + def disconnect(receiver, emitter): + print 'Disconnect', emitter, receiver + emitter = xr.load(emitter) + receiver = xr.load(receiver) + print emitter + print receiver + xs.disconnect(emitter, receiver) + + cli.add_command(disconnect) + + +def init_changes(): + @click.group() + def changes(): + pass + + cli.add_command(changes) + + @click.command() + def stage(): + log = operations.stage_changes() + print log.show() + + changes.add_command(stage) + + @click.command() + @click.option('--one', is_flag=True, default=False) + def commit(one): + if one: + operations.commit_one() + else: + operations.commit_changes() + + changes.add_command(commit) + + @click.command() + @click.option('--limit', default=5) + def history(limit): + print state.CL().show() + + changes.add_command(history) + + @click.command() + @click.option('--last', is_flag=True, default=False) + @click.option('--all', is_flag=True, default=False) + @click.option('--uid', default=None) + def rollback(last, all, uid): + if last: + print operations.rollback_last() + elif all: + print operations.rollback_all() + elif uid: + print operations.rollback_uid(uid) + + changes.add_command(rollback) + + @click.command() + @click.argument('uid') + def staged(uid): + for item in state.SL(): + if item.uid == uid: + print item + return + + changes.add_command(staged) + + +def init_cli_connections(): + @click.group() + def connections(): + pass + + cli.add_command(connections) + + @click.command() + def show(): + print json.dumps(xs.CLIENTS, indent=2) + + connections.add_command(show) + + # TODO: this requires graphing libraries + @click.command() + def graph(): + #g = xs.connection_graph() + g = xs.detailed_connection_graph() + + nx.write_dot(g, 'graph.dot') + subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png']) + + # Matplotlib + #pos = nx.spring_layout(g) + #nx.draw_networkx_nodes(g, pos) + #nx.draw_networkx_edges(g, pos, arrows=True) + #nx.draw_networkx_labels(g, pos) + #plt.axis('off') + #plt.savefig('graph.png') + + connections.add_command(graph) + + +def init_cli_deployment_config(): + @click.command() + @click.argument('filepath') + def deploy(filepath): + print 'Deploying from file {}'.format(filepath) + xd.deploy(filepath) + + cli.add_command(deploy) + + +if __name__ == '__main__': + init_cli_resource() + init_cli_connect() + init_cli_connections() + init_cli_deployment_config() + init_changes() + + cli() diff --git a/solar/requirements.txt b/solar/requirements.txt index 670b4eaf..8d09644b 100644 --- a/solar/requirements.txt +++ b/solar/requirements.txt @@ -10,3 +10,4 @@ mock dictdiffer==0.4.0 enum34==1.0.4 redis==2.10.3 +py.test diff --git a/solar/solar/interfaces/db/redis_db.py b/solar/solar/interfaces/db/redis_db.py index 3c43ccb7..6d432837 100644 --- a/solar/solar/interfaces/db/redis_db.py +++ b/solar/solar/interfaces/db/redis_db.py @@ -23,19 +23,22 @@ class RedisDB(object): def read(self, uid, collection=COLLECTIONS.resource): try: return json.loads( - self._r.get(self._make_key(collection, uid)) + self._r.get(self._make_key(collection.name, uid)) ) except TypeError: return None def save(self, uid, data, collection=COLLECTIONS.resource): return self._r.set( - self._make_key(collection, uid), + self._make_key(collection.name, uid), json.dumps(data) ) + def delete(self, uid, collection): + return self._r.delete(self._make_key(collection, uid)) + def get_list(self, collection=COLLECTIONS.resource): - key_glob = self._make_key(collection, '*') + key_glob = self._make_key(collection.name, '*') for key in self._r.keys(key_glob): yield json.loads(self._r.get(key)) diff --git a/solar/solar/operations.py b/solar/solar/operations.py index a3e753fc..cb3d3e23 100644 --- a/solar/solar/operations.py +++ b/solar/solar/operations.py @@ -48,12 +48,20 @@ def to_dict(resource, graph): 'connections': connections(resource, graph)} -def stage_changes(): - resources = resource.load_all() - conn_graph = signals.detailed_connection_graph() +def create_diff(staged, commited): + if 'connections' in commited: + commited['connections'].sort() + staged['connections'].sort() + if 'tags' in commited: + commited['tags'].sort() + staged['tags'].sort() + + return list(diff(commited, staged)) + + +def _stage_changes(staged_resources, conn_graph, + commited_resources, staged_log): - commited = state.CD() - log = state.SL() action = None try: @@ -64,17 +72,11 @@ def stage_changes(): raise for res_uid in srt: - commited_data = commited.get(res_uid, {}) - staged_data = to_dict(resources[res_uid], conn_graph) + commited_data = commited_resources.get(res_uid, {}) + staged_data = staged_resources.get(res_uid, {}) - if 'connections' in commited_data: - commited_data['connections'].sort() - staged_data['connections'].sort() - if 'tags' in commited_data: - commited_data['tags'].sort() - staged_data['tags'].sort() + df = create_diff(staged_data, commited_data) - df = list(diff(commited_data, staged_data)) if df: log_item = state.LogItem( @@ -82,8 +84,18 @@ def stage_changes(): res_uid, df, guess_action(commited_data, staged_data)) - log.add(log_item) - return log + staged_log.append(log_item) + return staged_log + + +def stage_changes(): + resources = resource.load_all() + conn_graph = signals.detailed_connection_graph() + staged = {r.name: to_dict(r, conn_graph) for r in resource.load_all().values()} + commited = state.CD() + log = state.SL() + log.delete() + return _stage_changes(staged, conn_graph, commited, log) def execute(res, action): @@ -94,10 +106,7 @@ def execute(res, action): return state.STATES.error -def commit(li, resources): - commited = state.CD() - history = state.CL() - staged = state.SL() +def commit(li, resources, commited, history): staged_res = resources[li.res] @@ -123,16 +132,19 @@ def commit(li, resources): commited[li.res] = staged_data li.state = result_state - history.add(li) + history.append(li) if result_state is state.STATES.error: raise Exception('Failed') def commit_one(): + commited = state.CD() + history = state.CL() staged = state.SL() + resources = resource.load_all() - commit(staged.popleft(), resources) + commit(staged.popleft(), resources, commited, history) def commit_changes(): @@ -143,7 +155,7 @@ def commit_changes(): resources = resource.load_all() while staged: - commit(staged.popleft(), resources) + commit(staged.popleft(), resources, commited, history) def rollback(log_item): @@ -160,12 +172,12 @@ def rollback(log_item): for e, r, mapping in staged.get('connections', ()): signals.connect(resources[e], resources[r], dict([mapping])) - df = list(diff(commited, staged)) + df = create_diff(commited, staged) log_item = state.LogItem( utils.generate_uuid(), log_item.res, df, guess_action(commited, staged)) - log.add(log_item) + log.append(log_item) res = resource.load(log_item.res) res.update(staged.get('args', {})) diff --git a/solar/solar/state.py b/solar/solar/state.py index 0f3af6c2..dc0d0111 100644 --- a/solar/solar/state.py +++ b/solar/solar/state.py @@ -83,6 +83,10 @@ class Log(object): l['diff'], l['action'], getattr(STATES, l['state'])) for l in items]) + def delete(self): + self.items = deque() + db.delete(self.path, db.COLLECTIONS.state_log) + def sync(self): db.save( self.path, @@ -90,7 +94,7 @@ class Log(object): collection=db.COLLECTIONS.state_log ) - def add(self, logitem): + def append(self, logitem): self.items.append(logitem) self.sync() diff --git a/solar/solar/test/test_operations.py b/solar/solar/test/test_operations.py new file mode 100644 index 00000000..1aa41e04 --- /dev/null +++ b/solar/solar/test/test_operations.py @@ -0,0 +1,107 @@ + +from pytest import fixture +import mock + +import networkx as nx + +from solar import operations +from dictdiffer import revert, patch, diff + + +@fixture +def staged(): + return {'uid': 'res.1', + 'tags': ['res', 'node.1'], + 'args': {'ip': '10.0.0.2', + 'list_val': [1, 2]}, + 'connections': [ + ['node.1', 'res.1', ['ip', 'ip']], + ['node.1', 'res.1', ['key', 'key']]] + } + +@fixture +def commited(): + return {'uid': 'res.1', + 'tags': ['res', 'node.1'], + 'args': {'ip': '10.0.0.2', + 'list_val': [1]}, + 'connections': [ + ['node.1', 'res.1', ['ip', 'ip']]] + } + +@fixture +def full_diff(staged): + return operations.create_diff(staged, {}) + + +@fixture +def diff_for_update(staged, commited): + return operations.create_diff(staged, commited) + + +def test_create_diff_with_empty_commited(full_diff): + # add will be executed + expected = [ + ('add', '', [ + ('connections', [['node.1', 'res.1', ['ip', 'ip']], + ['node.1', 'res.1', ['key', 'key']]]), + ('args', {'ip': '10.0.0.2', 'list_val': [1, 2]}), + ('uid', 'res.1'), + ('tags', ['res', 'node.1'])])] + assert full_diff == expected + + +def test_create_diff_modified(diff_for_update): + assert diff_for_update == [ + ('add', 'connections', [(1, ['node.1', 'res.1', ['key', 'key']])]), + ('add', 'args.list_val', [(1, 2)])] + + +def test_verify_patch_creates_expected(staged, diff_for_update, commited): + expected = patch(diff_for_update, commited) + assert expected == staged + + +def test_revert_update(staged, diff_for_update, commited): + expected = revert(diff_for_update, staged) + assert expected == commited + + +@fixture +def resources(): + r = {'n.1': + {'uid': 'n.1', + 'args': {'ip': '10.20.0.2'}, + 'connections': [], + 'tags': []}, + 'r.1': + {'uid': 'r.1', + 'args': {'ip': '10.20.0.2'}, + 'connections': [['n.1', 'r.1', ['ip', 'ip']]], + 'tags': []}, + 'h.1': + {'uid': 'h.1', + 'args': {'ip': '10.20.0.2', + 'ips': ['10.20.0.2']}, + 'connections': [['n.1', 'h.1', ['ip', 'ip']]], + 'tags': []}} + return r + +@fixture +def conn_graph(): + edges = [ + ('n.1', 'r.1', {'label': 'ip:ip'}), + ('n.1', 'h.1', {'label': 'ip:ip'}), + ('r.1', 'h.1', {'label': 'ip:ips'}) + ] + mdg = nx.MultiDiGraph() + mdg.add_edges_from(edges) + return mdg + + +def test_stage_changes(resources, conn_graph): + commited = {} + log = operations._stage_changes(resources, conn_graph, commited, []) + + assert len(log) == 3 + assert [l.res for l in log] == ['n.1', 'r.1', 'h.1'] From 1fa4ae84e67ceaccc1f613bd0aa2acb2fab0fc5b Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 9 Jun 2015 16:01:31 +0300 Subject: [PATCH 2/8] Remove file based databases --- solar/solar/interfaces/db/__init__.py | 5 +- .../interfaces/db/cached_file_system_db.py | 110 ------- solar/solar/interfaces/db/file_system_db.py | 70 ----- solar/solar/third_party/__init__.py | 0 solar/solar/third_party/dir_dbm.py | 297 ------------------ 5 files changed, 1 insertion(+), 481 deletions(-) delete mode 100644 solar/solar/interfaces/db/cached_file_system_db.py delete mode 100644 solar/solar/interfaces/db/file_system_db.py delete mode 100644 solar/solar/third_party/__init__.py delete mode 100644 solar/solar/third_party/dir_dbm.py diff --git a/solar/solar/interfaces/db/__init__.py b/solar/solar/interfaces/db/__init__.py index 92f519a6..edcdab4b 100644 --- a/solar/solar/interfaces/db/__init__.py +++ b/solar/solar/interfaces/db/__init__.py @@ -1,10 +1,7 @@ -from solar.interfaces.db.cached_file_system_db import CachedFileSystemDB -from solar.interfaces.db.file_system_db import FileSystemDB + from solar.interfaces.db.redis_db import RedisDB mapping = { - 'cached_file_system': CachedFileSystemDB, - 'file_system': FileSystemDB, 'redis_db': RedisDB, } diff --git a/solar/solar/interfaces/db/cached_file_system_db.py b/solar/solar/interfaces/db/cached_file_system_db.py deleted file mode 100644 index d5b8b06c..00000000 --- a/solar/solar/interfaces/db/cached_file_system_db.py +++ /dev/null @@ -1,110 +0,0 @@ -from solar.third_party.dir_dbm import DirDBM - -import atexit -import os -import types -import yaml - -from solar import utils -from solar import errors - - -class CachedFileSystemDB(DirDBM): - STORAGE_PATH = utils.read_config()['file-system-db']['storage-path'] - RESOURCE_COLLECTION_NAME = 'resource' - - _CACHE = {} - - def __init__(self): - utils.create_dir(self.STORAGE_PATH) - super(CachedFileSystemDB, self).__init__(self.STORAGE_PATH) - self.entities = {} - - atexit.register(self.flush) - - def __setitem__(self, k, v): - """ - C{dirdbm[k] = v} - Create or modify a textfile in this directory - @type k: strings @param k: key to setitem - @type v: strings @param v: value to associate with C{k} - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - # NOTE: Can be not a string if _writeFile in the child is redefined - # assert type(v) == types.StringType, "DirDBM value must be a string" - k = self._encode(k) - - # we create a new file with extension .new, write the data to it, and - # if the write succeeds delete the old file and rename the new one. - old = os.path.join(self.dname, k) - if os.path.exists(old): - new = old + ".rpl" # replacement entry - else: - new = old + ".new" # new entry - try: - self._writeFile(old, v) - except: - raise - - def get_resource(self, uid): - return self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - - def get_obj_resource(self, uid): - from solar.core.resource import wrap_resource - raw_resource = self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - - return wrap_resource(raw_resource) - - def add_resource(self, uid, resource): - self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] = resource - - def store(self, collection, obj): - if 'id' in obj: - self[self._make_key(collection, obj['id'])] = obj - else: - raise errors.CannotFindID('Cannot find id for object {0}'.format(obj)) - - def store_list(self, collection, objs): - for obj in objs: - self.store(collection, obj) - - def get_list(self, collection): - collection_keys = filter( - lambda k: k.startswith('{0}-'.format(collection)), - self.keys()) - - return map(lambda k: self[k], collection_keys) - - def get_record(self, collection, _id): - key = self._make_key(collection, _id) - if key not in self: - return None - - return self[key] - - def _make_key(self, collection, _id): - return '{0}-{1}'.format(collection, _id) - - def _readFile(self, path): - if path not in self._CACHE: - data = yaml.load(super(CachedFileSystemDB, self)._readFile(path)) - self._CACHE[path] = data - return data - - return self._CACHE[path] - - def _writeFile(self, path, data): - self._CACHE[path] = data - - def _encode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key - - def _decode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key - - def flush(self): - print 'FLUSHING DB' - for path, data in self._CACHE.items(): - super(CachedFileSystemDB, self)._writeFile(path, yaml.dump(data)) diff --git a/solar/solar/interfaces/db/file_system_db.py b/solar/solar/interfaces/db/file_system_db.py deleted file mode 100644 index 4c3c733e..00000000 --- a/solar/solar/interfaces/db/file_system_db.py +++ /dev/null @@ -1,70 +0,0 @@ -from solar.third_party.dir_dbm import DirDBM - -import yaml - -from solar import utils -from solar import errors - - -class FileSystemDB(DirDBM): - STORAGE_PATH = utils.read_config()['file-system-db']['storage-path'] - RESOURCE_COLLECTION_NAME = 'resource' - - def __init__(self): - utils.create_dir(self.STORAGE_PATH) - super(FileSystemDB, self).__init__(self.STORAGE_PATH) - self.entities = {} - - def get_resource(self, uid): - return self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - - def get_obj_resource(self, uid): - if not uid in self.entities: - from solar.core.resource import wrap_resource - raw_resource = self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - self.entities[uid] = wrap_resource(raw_resource) - return self.entities[uid] - - def add_resource(self, uid, resource): - self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] = resource - - def store(self, collection, obj): - if 'id' in obj: - self[self._make_key(collection, obj['id'])] = obj - else: - raise errors.CannotFindID('Cannot find id for object {0}'.format(obj)) - - def store_list(self, collection, objs): - for obj in objs: - self.store(collection, obj) - - def get_list(self, collection): - collection_keys = filter( - lambda k: k.startswith('{0}-'.format(collection)), - self.keys()) - - return map(lambda k: self[k], collection_keys) - - def get_record(self, collection, _id): - key = self._make_key(collection, _id) - if key not in self: - return None - - return self[key] - - def _make_key(self, collection, _id): - return '{0}-{1}'.format(collection, _id) - - def _readFile(self, path): - return yaml.load(super(FileSystemDB, self)._readFile(path)) - - def _writeFile(self, path, data): - return super(FileSystemDB, self)._writeFile(path, utils.yaml_dump(data)) - - def _encode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key - - def _decode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key diff --git a/solar/solar/third_party/__init__.py b/solar/solar/third_party/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/third_party/dir_dbm.py b/solar/solar/third_party/dir_dbm.py deleted file mode 100644 index cf7a4624..00000000 --- a/solar/solar/third_party/dir_dbm.py +++ /dev/null @@ -1,297 +0,0 @@ -# -*- test-case-name: twisted.test.test_dirdbm -*- -# -# Copyright (c) Twisted Matrix Laboratories. -# See LICENSE for details. - - - -""" -DBM-style interface to a directory. -Each key is stored as a single file. This is not expected to be very fast or -efficient, but it's good for easy debugging. -DirDBMs are *not* thread-safe, they should only be accessed by one thread attacheda time. -No files should be placed in the working directory of a DirDBM save those -created by the DirDBM itself! -Maintainer: Itamar Shtull-Trauring -""" - - -import os -import types -import base64 -import glob - -try: - import cPickle as pickle -except ImportError: - import pickle - -try: - _open -except NameError: - _open = open - - -class DirDBM(object): - """A directory with a DBM interface. - - This class presents a hash-like interface to a directory of small, - flat files. It can only use strings as keys or values. - """ - - def __init__(self, name): - """ - @type name: strings - @param name: Base path to use for the directory storage. - """ - self.dname = os.path.abspath(name) - if not os.path.isdir(self.dname): - os.mkdir(self.dname) - else: - # Run recovery, in case we crashed. we delete all files ending - # with ".new". Then we find all files who end with ".rpl". If about - # corresponding file exists without ".rpl", we assume the write - # failed and delete the ".rpl" file. If only a ".rpl" exist we - # assume the program crashed right after deleting the old entry - # but before renaming the replacement entry. - # - # NOTE: '.' is NOT in the base64 alphabet! - for f in glob.glob(os.path.join(self.dname, "*.new")): - os.remove(f) - replacements = glob.glob(os.path.join(self.dname, "*.rpl")) - for f in replacements: - old = f[:-4] - if os.path.exists(old): - os.remove(f) - else: - os.rename(f, old) - - def _encode(self, k): - """Encode a key so it can be used as a filename. - """ - # NOTE: '_' is NOT in the base64 alphabet! - return base64.encodestring(k).replace('\n', '_').replace("/", "-") - - def _decode(self, k): - """Decode a filename to get the key. - """ - return base64.decodestring(k.replace('_', '\n').replace("-", "/")) - - def _readFile(self, path): - """Read in the contents of a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "rb") - s = f.read() - f.close() - return s - - def _writeFile(self, path, data): - """Write data to a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "wb") - f.write(data) - f.flush() - f.close() - - def __len__(self): - """ - @return: The number of key/value pairs in this Shelf - """ - return len(os.listdir(self.dname)) - - def __setitem__(self, k, v): - """ - C{dirdbm[k] = v} - Create or modify a textfile in this directory - @type k: strings @param k: key to setitem - @type v: strings @param v: value to associate with C{k} - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - # NOTE: Can be not a string if _writeFile in the child is redefined - # assert type(v) == types.StringType, "DirDBM value must be a string" - k = self._encode(k) - - # we create a new file with extension .new, write the data to it, and - # if the write succeeds delete the old file and rename the new one. - old = os.path.join(self.dname, k) - if os.path.exists(old): - new = old + ".rpl" # replacement entry - else: - new = old + ".new" # new entry - try: - self._writeFile(new, v) - except: - os.remove(new) - raise - else: - if os.path.exists(old): os.remove(old) - os.rename(new, old) - - def __getitem__(self, k): - """ - C{dirdbm[k]} - Get the contents of a file in this directory as a string. - - @type k: string @param k: key to lookup - - @return: The value associated with C{k} - @raise KeyError: Raised when there is no such keyerror - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(k)) - try: - return self._readFile(path) - except Exception as exc: - raise KeyError, k - - def __delitem__(self, k): - """ - C{del dirdbm[foo]} - Delete a file in this directory. - - @type k: string - @param k: key to delete - - @raise KeyError: Raised when there is no such keyerror - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - k = self._encode(k) - try: os.remove(os.path.join(self.dname, k)) - except (OSError, IOError): raise KeyError(self._decode(k)) - - def keys(self): - """ - @return: a C{list} of filenames (keys). - """ - return map(self._decode, os.listdir(self.dname)) - - def values(self): - """ - @return: a C{list} of file-contents (values). - """ - vals = [] - keys = self.keys() - for key in keys: - vals.append(self[key]) - return vals - - def items(self): - """ - @return: a C{list} of 2-tuples containing key/value pairs. - """ - items = [] - keys = self.keys() - for key in keys: - items.append((key, self[key])) - return items - - def has_key(self, key): - """ - @type key: string - @param key: The key to test - - @return: A true value if this dirdbm has the specified key, a faluse - value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def setdefault(self, key, value): - """ - @type key: string - @param key: The key to lookup - - @param value: The value to associate with key if key is not already - associated with a value. - """ - if not self.has_key(key): - self[key] = value - return value - return self[key] - - def get(self, key, default = None): - """ - @type key: string - @param key: The key to lookup - - @param default: The value to return if the given key does not exist - @return: The value associated with C{key} or C{default} if note - C{self.has_key(key)} - """ - if self.has_key(key): - return self[key] - else: - return default - - def __contains__(self, key): - """ - C{key in dirdbm} - @type key: string - @param key: The key to test - - @return: A true value if C{self.has_key(key)}, a false value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def update(self, dict): - """ - Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting - keys will be overwritten with the values from C{dict}. - @type dict: mapping - @param dict: A mapping of key/value pairs to add to this dirdbm. - """ - for key, val in dict.items(): - self[key]=valid - - def copyTo(self, path): - """ - Copy the contents of this dirdbm to the dirdbm at C{path}. - - @type path: C{str} - @param path: The path of the dirdbm to copy to. If a dirdbm - exists at the destination path, it is cleared first. - - @rtype: C{DirDBM} - @return: The dirdbm this dirdbm was copied to. - """ - path = os.path.abspath(path) - assert path != self.dname - - d = self.__class__(path) - d.clear() - for k in self.keys(): - d[k] = self[k] - return data - - def clear(self): - """ - Delete all key/value pairs in this dirdbm. - """ - for k in self.keys(): - del self[k] - - def close(self): - """ - Close this dbm: no-op, for dbm-style interface compliance. - """ - - def getModificationTime(self, key): - """ - Returns modification time of an entry. - - @return: Last modification date (seconds since epoch) of entry C{key} - @raise KeyError: Raised when there is no such keyerror - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(key)) - if os.path.isfile(path): - return os.path.getmtime(path) - else: - raise KeyError, key From 1f72b1d6b26136abb1d060ac4abb9e0317aac66e Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 9 Jun 2015 17:16:26 +0300 Subject: [PATCH 3/8] Use full resource representation in staged data --- solar/solar/core/resource.py | 2 +- solar/solar/operations.py | 14 ++++-------- solar/solar/test/test_operations.py | 34 +++++++++++++++-------------- 3 files changed, 23 insertions(+), 27 deletions(-) diff --git a/solar/solar/core/resource.py b/solar/solar/core/resource.py index b3e7babf..5e2363e0 100644 --- a/solar/solar/core/resource.py +++ b/solar/solar/core/resource.py @@ -24,7 +24,7 @@ class Resource(object): def __init__(self, name, metadata, args, tags=None): self.name = name self.metadata = metadata - self.actions = metadata['actions'].keys() if metadata['actions'] else None + self.actions = metadata.get('actions', {}).keys() or None self.args = {} for arg_name, arg_value in args.items(): diff --git a/solar/solar/operations.py b/solar/solar/operations.py index cb3d3e23..81c9f355 100644 --- a/solar/solar/operations.py +++ b/solar/solar/operations.py @@ -42,10 +42,9 @@ def connections(res, graph): def to_dict(resource, graph): - return {'uid': resource.name, - 'tags': resource.tags, - 'args': resource.args_dict(), - 'connections': connections(resource, graph)} + res = resource.to_dict() + res['connections'] = connections(resource, graph) + return res def create_diff(staged, commited): @@ -109,16 +108,11 @@ def execute(res, action): def commit(li, resources, commited, history): staged_res = resources[li.res] - staged_data = patch(li.diff, commited.get(li.res, {})) # TODO(dshulyak) think about this hack for update if li.action == 'update': - commited_res = resource.Resource( - staged_res.name, - staged_res.metadata, - commited[li.res]['args'], - commited[li.res]['tags']) + commited_res = resource.wrap_resource(commited.get(li.res, {})) result_state = execute(commited_res, 'remove') if result_state is state.STATES.success: diff --git a/solar/solar/test/test_operations.py b/solar/solar/test/test_operations.py index 1aa41e04..8b06d9ee 100644 --- a/solar/solar/test/test_operations.py +++ b/solar/solar/test/test_operations.py @@ -1,19 +1,20 @@ from pytest import fixture import mock - +from dictdiffer import revert, patch, diff import networkx as nx from solar import operations -from dictdiffer import revert, patch, diff +from solar.core.resource import wrap_resource @fixture def staged(): - return {'uid': 'res.1', + return {'id': 'res.1', 'tags': ['res', 'node.1'], - 'args': {'ip': '10.0.0.2', - 'list_val': [1, 2]}, + 'input': {'ip': {'value': '10.0.0.2'}, + 'list_val': {'value': [1, 2]}}, + 'metadata': {}, 'connections': [ ['node.1', 'res.1', ['ip', 'ip']], ['node.1', 'res.1', ['key', 'key']]] @@ -21,10 +22,11 @@ def staged(): @fixture def commited(): - return {'uid': 'res.1', + return {'id': 'res.1', 'tags': ['res', 'node.1'], - 'args': {'ip': '10.0.0.2', + 'input': {'ip': '10.0.0.2', 'list_val': [1]}, + 'metadata': {}, 'connections': [ ['node.1', 'res.1', ['ip', 'ip']]] } @@ -41,20 +43,16 @@ def diff_for_update(staged, commited): def test_create_diff_with_empty_commited(full_diff): # add will be executed - expected = [ - ('add', '', [ - ('connections', [['node.1', 'res.1', ['ip', 'ip']], - ['node.1', 'res.1', ['key', 'key']]]), - ('args', {'ip': '10.0.0.2', 'list_val': [1, 2]}), - ('uid', 'res.1'), - ('tags', ['res', 'node.1'])])] + expected = [('add', '', [('connections', [['node.1', 'res.1', ['ip', 'ip']], ['node.1', 'res.1', ['key', 'key']]]), ('input', {'ip': {'value': '10.0.0.2'}, 'list_val': {'value': [1, 2]}}), ('metadata', {}), ('id', 'res.1'), ('tags', ['res', 'node.1'])])] assert full_diff == expected def test_create_diff_modified(diff_for_update): assert diff_for_update == [ - ('add', 'connections', [(1, ['node.1', 'res.1', ['key', 'key']])]), - ('add', 'args.list_val', [(1, 2)])] + ('add', 'connections', + [(1, ['node.1', 'res.1', ['key', 'key']])]), + ('change', 'input.ip', ('10.0.0.2', {'value': '10.0.0.2'})), + ('change', 'input.list_val', ([1], {'value': [1, 2]}))] def test_verify_patch_creates_expected(staged, diff_for_update, commited): @@ -105,3 +103,7 @@ def test_stage_changes(resources, conn_graph): assert len(log) == 3 assert [l.res for l in log] == ['n.1', 'r.1', 'h.1'] + + +def test_resource_fixture(staged): + res = wrap_resource(staged) From 6b9a2e1655c6433eb15cd76276e3e3ec18089fb8 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 10 Jun 2015 10:01:34 +0300 Subject: [PATCH 4/8] Add conftest and other setup for testing --- solar/requirements.txt | 3 ++- solar/solar/core/signals.py | 4 ---- solar/solar/interfaces/db/__init__.py | 2 ++ solar/solar/interfaces/db/redis_db.py | 10 +++++++++- solar/solar/test/conftest.py | 20 ++++++++++++++++++++ solar/solar/test/test_signals.py | 4 +++- solar/solar/test/test_validation.py | 4 +++- 7 files changed, 39 insertions(+), 8 deletions(-) create mode 100644 solar/solar/test/conftest.py diff --git a/solar/requirements.txt b/solar/requirements.txt index 8d09644b..a3d48744 100644 --- a/solar/requirements.txt +++ b/solar/requirements.txt @@ -10,4 +10,5 @@ mock dictdiffer==0.4.0 enum34==1.0.4 redis==2.10.3 -py.test +pytest +fakeredis diff --git a/solar/solar/core/signals.py b/solar/solar/core/signals.py index 0b3a911d..04eca14f 100644 --- a/solar/solar/core/signals.py +++ b/solar/solar/core/signals.py @@ -110,10 +110,6 @@ class Connections(object): CLIENTS = {} - path = utils.read_config()[CLIENTS_CONFIG_KEY] - if os.path.exists(path): - os.remove(path) - @staticmethod def flush(): print 'FLUSHING Connections' diff --git a/solar/solar/interfaces/db/__init__.py b/solar/solar/interfaces/db/__init__.py index edcdab4b..306c3013 100644 --- a/solar/solar/interfaces/db/__init__.py +++ b/solar/solar/interfaces/db/__init__.py @@ -1,8 +1,10 @@ from solar.interfaces.db.redis_db import RedisDB +from solar.interfaces.db.redis_db import FakeRedisDB mapping = { 'redis_db': RedisDB, + 'fakeredis_db': FakeRedisDB } DB = None diff --git a/solar/solar/interfaces/db/redis_db.py b/solar/solar/interfaces/db/redis_db.py index 6d432837..0f0aaec0 100644 --- a/solar/solar/interfaces/db/redis_db.py +++ b/solar/solar/interfaces/db/redis_db.py @@ -1,6 +1,7 @@ from enum import Enum import json import redis +import fakeredis from solar import utils from solar import errors @@ -15,9 +16,11 @@ class RedisDB(object): 'host': 'localhost', 'port': 6379, } + REDIS_CLIENT = redis.StrictRedis + def __init__(self): - self._r = redis.StrictRedis(**self.DB) + self._r = self.REDIS_CLIENT(**self.DB) self.entities = {} def read(self, uid, collection=COLLECTIONS.resource): @@ -48,3 +51,8 @@ class RedisDB(object): def _make_key(self, collection, _id): return '{0}:{1}'.format(collection, _id) + + +class FakeRedisDB(RedisDB): + + REDIS_CLIENT = fakeredis.FakeStrictRedis diff --git a/solar/solar/test/conftest.py b/solar/solar/test/conftest.py new file mode 100644 index 00000000..9e53bb77 --- /dev/null +++ b/solar/solar/test/conftest.py @@ -0,0 +1,20 @@ + +from pytest import fixture + +from solar.interfaces import db + + +def pytest_configure(): + db.DB = db.mapping['fakeredis_db']() + + +@fixture(autouse=True) +def cleanup(request): + + def fin(): + from solar.core import signals + + db.get_db().clear() + signals.Connections.clear() + + request.addfinalizer(fin) diff --git a/solar/solar/test/test_signals.py b/solar/solar/test/test_signals.py index 45862fdf..ebee1942 100644 --- a/solar/solar/test/test_signals.py +++ b/solar/solar/test/test_signals.py @@ -4,7 +4,9 @@ import base from solar.core import signals as xs +from pytest import mark +@mark.xfail class TestBaseInput(base.BaseResourceTest): def test_input_dict_type(self): sample_meta_dir = self.make_resource_meta(""" @@ -173,7 +175,7 @@ input: with self.assertRaises(Exception): xs.connect(sample2, sample1) - +@mark.xfail class TestListInput(base.BaseResourceTest): def test_list_input_single(self): sample_meta_dir = self.make_resource_meta(""" diff --git a/solar/solar/test/test_validation.py b/solar/solar/test/test_validation.py index 0e5339bb..3c99f334 100644 --- a/solar/solar/test/test_validation.py +++ b/solar/solar/test/test_validation.py @@ -1,10 +1,12 @@ import unittest +from pytest import mark + from solar.test import base from solar.core import validation as sv - +@mark.xfail class TestInputValidation(base.BaseResourceTest): def test_input_str_type(self): sample_meta_dir = self.make_resource_meta(""" From 799504f14cb011e86e68ab6d59ec3a59ee77a0b8 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 10 Jun 2015 12:07:52 +0300 Subject: [PATCH 5/8] Add test for rolling back one attribute --- solar/solar/core/resource.py | 4 +- solar/solar/operations.py | 6 +- solar/solar/state.py | 3 + solar/solar/test/conftest.py | 17 ++++++ ..._operations.py => test_diff_generation.py} | 0 .../solar/test/test_stage_commit_procedure.py | 59 +++++++++++++++++++ 6 files changed, 84 insertions(+), 5 deletions(-) rename solar/solar/test/{test_operations.py => test_diff_generation.py} (100%) create mode 100644 solar/solar/test/test_stage_commit_procedure.py diff --git a/solar/solar/core/resource.py b/solar/solar/core/resource.py index 5e2363e0..8342a3a4 100644 --- a/solar/solar/core/resource.py +++ b/solar/solar/core/resource.py @@ -63,9 +63,9 @@ class Resource(object): def to_dict(self): return { - 'name': self.name, + 'id': self.name, 'metadata': self.metadata, - 'args': self.args_show(), + 'input': self.args_show(), 'tags': self.tags, } diff --git a/solar/solar/operations.py b/solar/solar/operations.py index 81c9f355..7d04f267 100644 --- a/solar/solar/operations.py +++ b/solar/solar/operations.py @@ -166,7 +166,7 @@ def rollback(log_item): for e, r, mapping in staged.get('connections', ()): signals.connect(resources[e], resources[r], dict([mapping])) - df = create_diff(commited, staged) + df = create_diff(staged, commited) log_item = state.LogItem( utils.generate_uuid(), @@ -174,10 +174,10 @@ def rollback(log_item): log.append(log_item) res = resource.load(log_item.res) - res.update(staged.get('args', {})) + res.update(staged.get('input', {})) res.save() - return log + return log_item def rollback_uid(uid): diff --git a/solar/solar/state.py b/solar/solar/state.py index dc0d0111..d6c6d7c9 100644 --- a/solar/solar/state.py +++ b/solar/solar/state.py @@ -112,6 +112,9 @@ class Log(object): return ['L(uuid={0}, res={1}, action={2})'.format( l.uid, l.res, l.action) for l in self.items] + def __len__(self): + return len(self.items) + def __repr__(self): return 'Log({0})'.format(self.path) diff --git a/solar/solar/test/conftest.py b/solar/solar/test/conftest.py index 9e53bb77..8cf06c76 100644 --- a/solar/solar/test/conftest.py +++ b/solar/solar/test/conftest.py @@ -1,7 +1,10 @@ +import os + from pytest import fixture from solar.interfaces import db +from solar import utils def pytest_configure(): @@ -18,3 +21,17 @@ def cleanup(request): signals.Connections.clear() request.addfinalizer(fin) + + + +@fixture +def default_resources(): + from solar.core import signals + from solar.core import resource + node1 = resource.create('node1', '/vagrant/resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'}) + + rabbitmq_service1 = resource.create('rabbitmq', '/vagrant/resources/rabbitmq_service/', {'ssh_user': '', 'ip': '','management_port': '15672', 'port': '5672', 'ssh_key': '', 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'}) + openstack_vhost = resource.create('vhost', '/vagrant/resources/rabbitmq_vhost/', {'ssh_user': '', 'ip': '', 'ssh_key': '', 'vhost_name': 'openstack', 'container_name': ''}) + signals.connect(node1, rabbitmq_service1) + signals.connect(rabbitmq_service1, openstack_vhost) + return resource.load_all() diff --git a/solar/solar/test/test_operations.py b/solar/solar/test/test_diff_generation.py similarity index 100% rename from solar/solar/test/test_operations.py rename to solar/solar/test/test_diff_generation.py diff --git a/solar/solar/test/test_stage_commit_procedure.py b/solar/solar/test/test_stage_commit_procedure.py new file mode 100644 index 00000000..bce39fc9 --- /dev/null +++ b/solar/solar/test/test_stage_commit_procedure.py @@ -0,0 +1,59 @@ + +import pytest +from mock import patch + +from solar.core import resource +from solar import operations +from solar import state + + +@patch('solar.core.actions.resource_action') +@pytest.mark.usefixtures("default_resources") +def test_changes_on_update_image(maction): + log = operations.stage_changes() + + assert len(log) == 3 + + operations.commit_changes() + + rabbitmq = resource.load('rabbitmq') + rabbitmq.update({'image': 'different'}) + log = operations.stage_changes() + + assert len(log) == 1 + + item = log.items[0] + + assert item.diff == [ + ('change', u'input.image.value', + (u'rabbitmq:3-management', u'different')), + ('change', u'metadata.input.image.value', + (u'rabbitmq:3-management', u'different'))] + + assert item.action == 'update' + + operations.commit_changes() + + commited = state.CD() + + assert commited['rabbitmq']['input']['image'] == { + u'emitter': None, u'value': u'different'} + + reverse = operations.rollback(state.CL().items[-1]) + + assert reverse.diff == [ + ('change', u'input.image.value', + (u'different', u'rabbitmq:3-management')), + ('change', u'metadata.input.image.value', + (u'different', u'rabbitmq:3-management'))] + + operations.commit_changes() + + commited = state.CD() + + assert commited['rabbitmq']['input']['image'] == { + u'emitter': None, u'value': u'rabbitmq:3-management'} + + + + From 857182a7ba4339213953f61381a863c8d656b2de Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 10 Jun 2015 12:59:03 +0300 Subject: [PATCH 6/8] Remove xfail from tests --- cli.py | 246 ---------------------------- solar/solar/test/test_signals.py | 4 +- solar/solar/test/test_validation.py | 2 +- 3 files changed, 3 insertions(+), 249 deletions(-) delete mode 100755 cli.py diff --git a/cli.py b/cli.py deleted file mode 100755 index 52509a4b..00000000 --- a/cli.py +++ /dev/null @@ -1,246 +0,0 @@ -#!/usr/bin/env python -import click -import json -#import matplotlib -#matplotlib.use('Agg') # don't show windows -#import matplotlib.pyplot as plt -import networkx as nx -import os -import subprocess - -from solar.core import actions as xa -from solar.core import resource as xr -from solar.core import signals as xs -from solar import operations -from solar import state - -from solar.interfaces.db import get_db - -db = get_db() - - -@click.group() -def cli(): - pass - - -def init_cli_resource(): - @click.group() - def resource(): - pass - - cli.add_command(resource) - - @click.command() - @click.argument('resource_path') - @click.argument('action_name') - def action(action_name, resource_path): - print 'action', resource_path, action_name - r = xr.load(resource_path) - xa.resource_action(r, action_name) - - resource.add_command(action) - - @click.command() - @click.argument('name') - @click.argument('base_path') - @click.argument('dest_path') - @click.argument('args') - def create(args, dest_path, base_path, name): - print 'create', name, base_path, dest_path, args - args = json.loads(args) - xr.create(name, base_path, dest_path, args) - - resource.add_command(create) - - @click.command() - @click.argument('resource_path') - @click.argument('tag_name') - @click.option('--add/--delete', default=True) - def tag(add, tag_name, resource_path): - print 'Tag', resource_path, tag_name, add - r = xr.load(resource_path) - if add: - r.add_tag(tag_name) - else: - r.remove_tag(tag_name) - r.save() - - resource.add_command(tag) - - @click.command() - @click.argument('path') - @click.option('--all/--one', default=False) - @click.option('--tag', default=None) - @click.option('--use-json/--no-use-json', default=False) - def show(use_json, tag, all, path): - import json - import six - - printer = lambda r: six.print_(r) - if use_json: - printer = lambda r: six.print_(json.dumps(r.to_dict())) - - if all or tag: - for name, resource in xr.load_all(path).items(): - show = True - if tag: - if tag not in resource.tags: - show = False - - if show: - printer(resource) - else: - printer(xr.load(path)) - - resource.add_command(show) - - @click.command() - @click.argument('name') - @click.argument('args') - def update(name, args): - args = json.loads(args) - all = xr.load_all() - r = all[name] - r.update(args) - resource.add_command(update) - - -def init_cli_connect(): - @click.command() - @click.argument('emitter') - @click.argument('receiver') - @click.option('--mapping', default=None) - def connect(mapping, receiver, emitter): - print 'Connect', emitter, receiver - emitter = xr.load(emitter) - receiver = xr.load(receiver) - print emitter - print receiver - if mapping is not None: - mapping = json.loads(mapping) - xs.connect(emitter, receiver, mapping=mapping) - - cli.add_command(connect) - - @click.command() - @click.argument('emitter') - @click.argument('receiver') - def disconnect(receiver, emitter): - print 'Disconnect', emitter, receiver - emitter = xr.load(emitter) - receiver = xr.load(receiver) - print emitter - print receiver - xs.disconnect(emitter, receiver) - - cli.add_command(disconnect) - - -def init_changes(): - @click.group() - def changes(): - pass - - cli.add_command(changes) - - @click.command() - def stage(): - log = operations.stage_changes() - print log.show() - - changes.add_command(stage) - - @click.command() - @click.option('--one', is_flag=True, default=False) - def commit(one): - if one: - operations.commit_one() - else: - operations.commit_changes() - - changes.add_command(commit) - - @click.command() - @click.option('--limit', default=5) - def history(limit): - print state.CL().show() - - changes.add_command(history) - - @click.command() - @click.option('--last', is_flag=True, default=False) - @click.option('--all', is_flag=True, default=False) - @click.option('--uid', default=None) - def rollback(last, all, uid): - if last: - print operations.rollback_last() - elif all: - print operations.rollback_all() - elif uid: - print operations.rollback_uid(uid) - - changes.add_command(rollback) - - @click.command() - @click.argument('uid') - def staged(uid): - for item in state.SL(): - if item.uid == uid: - print item - return - - changes.add_command(staged) - - -def init_cli_connections(): - @click.group() - def connections(): - pass - - cli.add_command(connections) - - @click.command() - def show(): - print json.dumps(xs.CLIENTS, indent=2) - - connections.add_command(show) - - # TODO: this requires graphing libraries - @click.command() - def graph(): - #g = xs.connection_graph() - g = xs.detailed_connection_graph() - - nx.write_dot(g, 'graph.dot') - subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png']) - - # Matplotlib - #pos = nx.spring_layout(g) - #nx.draw_networkx_nodes(g, pos) - #nx.draw_networkx_edges(g, pos, arrows=True) - #nx.draw_networkx_labels(g, pos) - #plt.axis('off') - #plt.savefig('graph.png') - - connections.add_command(graph) - - -def init_cli_deployment_config(): - @click.command() - @click.argument('filepath') - def deploy(filepath): - print 'Deploying from file {}'.format(filepath) - xd.deploy(filepath) - - cli.add_command(deploy) - - -if __name__ == '__main__': - init_cli_resource() - init_cli_connect() - init_cli_connections() - init_cli_deployment_config() - init_changes() - - cli() diff --git a/solar/solar/test/test_signals.py b/solar/solar/test/test_signals.py index ebee1942..fa7bda60 100644 --- a/solar/solar/test/test_signals.py +++ b/solar/solar/test/test_signals.py @@ -6,7 +6,7 @@ from solar.core import signals as xs from pytest import mark -@mark.xfail + class TestBaseInput(base.BaseResourceTest): def test_input_dict_type(self): sample_meta_dir = self.make_resource_meta(""" @@ -175,7 +175,7 @@ input: with self.assertRaises(Exception): xs.connect(sample2, sample1) -@mark.xfail + class TestListInput(base.BaseResourceTest): def test_list_input_single(self): sample_meta_dir = self.make_resource_meta(""" diff --git a/solar/solar/test/test_validation.py b/solar/solar/test/test_validation.py index 3c99f334..3480b460 100644 --- a/solar/solar/test/test_validation.py +++ b/solar/solar/test/test_validation.py @@ -6,7 +6,7 @@ from solar.test import base from solar.core import validation as sv -@mark.xfail + class TestInputValidation(base.BaseResourceTest): def test_input_str_type(self): sample_meta_dir = self.make_resource_meta(""" From 7eb941330d149cfdd7f4cae9445e24a4ed1b41bb Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 10 Jun 2015 16:38:46 +0300 Subject: [PATCH 7/8] Added tests for propagated data and connections --- solar/solar/operations.py | 6 +- solar/solar/test/conftest.py | 13 -- solar/solar/test/test_signals.py | 2 - .../solar/test/test_stage_commit_procedure.py | 13 ++ .../solar/test/test_update_propagated_data.py | 114 ++++++++++++++++++ solar/solar/test/test_validation.py | 2 - 6 files changed, 131 insertions(+), 19 deletions(-) create mode 100644 solar/solar/test/test_update_propagated_data.py diff --git a/solar/solar/operations.py b/solar/solar/operations.py index 7d04f267..d9f00cec 100644 --- a/solar/solar/operations.py +++ b/solar/solar/operations.py @@ -98,6 +98,7 @@ def stage_changes(): def execute(res, action): + return state.STATES.success try: actions.resource_action(res, action) return state.STATES.success @@ -112,7 +113,8 @@ def commit(li, resources, commited, history): # TODO(dshulyak) think about this hack for update if li.action == 'update': - commited_res = resource.wrap_resource(commited.get(li.res, {})) + commited_res = resource.wrap_resource( + commited[li.res]['metadata']) result_state = execute(commited_res, 'remove') if result_state is state.STATES.success: @@ -174,7 +176,7 @@ def rollback(log_item): log.append(log_item) res = resource.load(log_item.res) - res.update(staged.get('input', {})) + res.set_args(staged['input']) res.save() return log_item diff --git a/solar/solar/test/conftest.py b/solar/solar/test/conftest.py index 8cf06c76..10845b22 100644 --- a/solar/solar/test/conftest.py +++ b/solar/solar/test/conftest.py @@ -22,16 +22,3 @@ def cleanup(request): request.addfinalizer(fin) - - -@fixture -def default_resources(): - from solar.core import signals - from solar.core import resource - node1 = resource.create('node1', '/vagrant/resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'}) - - rabbitmq_service1 = resource.create('rabbitmq', '/vagrant/resources/rabbitmq_service/', {'ssh_user': '', 'ip': '','management_port': '15672', 'port': '5672', 'ssh_key': '', 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'}) - openstack_vhost = resource.create('vhost', '/vagrant/resources/rabbitmq_vhost/', {'ssh_user': '', 'ip': '', 'ssh_key': '', 'vhost_name': 'openstack', 'container_name': ''}) - signals.connect(node1, rabbitmq_service1) - signals.connect(rabbitmq_service1, openstack_vhost) - return resource.load_all() diff --git a/solar/solar/test/test_signals.py b/solar/solar/test/test_signals.py index fa7bda60..45862fdf 100644 --- a/solar/solar/test/test_signals.py +++ b/solar/solar/test/test_signals.py @@ -4,8 +4,6 @@ import base from solar.core import signals as xs -from pytest import mark - class TestBaseInput(base.BaseResourceTest): def test_input_dict_type(self): diff --git a/solar/solar/test/test_stage_commit_procedure.py b/solar/solar/test/test_stage_commit_procedure.py index bce39fc9..86424a57 100644 --- a/solar/solar/test/test_stage_commit_procedure.py +++ b/solar/solar/test/test_stage_commit_procedure.py @@ -7,6 +7,19 @@ from solar import operations from solar import state +@pytest.fixture +def default_resources(): + from solar.core import signals + from solar.core import resource + node1 = resource.create('node1', '/vagrant/resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'}) + + rabbitmq_service1 = resource.create('rabbitmq', '/vagrant/resources/rabbitmq_service/', {'ssh_user': '', 'ip': '','management_port': '15672', 'port': '5672', 'ssh_key': '', 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'}) + openstack_vhost = resource.create('vhost', '/vagrant/resources/rabbitmq_vhost/', {'ssh_user': '', 'ip': '', 'ssh_key': '', 'vhost_name': 'openstack', 'container_name': ''}) + signals.connect(node1, rabbitmq_service1) + signals.connect(rabbitmq_service1, openstack_vhost) + return resource.load_all() + + @patch('solar.core.actions.resource_action') @pytest.mark.usefixtures("default_resources") def test_changes_on_update_image(maction): diff --git a/solar/solar/test/test_update_propagated_data.py b/solar/solar/test/test_update_propagated_data.py new file mode 100644 index 00000000..a8fa7153 --- /dev/null +++ b/solar/solar/test/test_update_propagated_data.py @@ -0,0 +1,114 @@ +import pytest +from mock import patch + +from solar.core import signals +from solar.core import resource +from solar import operations +from solar import state + +@pytest.fixture +def resources(): + + node1 = resource.create('node1', '/vagrant/resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/', 'ssh_user': 'vagrant'}) + + mariadb_service1 = resource.create('mariadb', '/vagrant/resources/mariadb_service', {'image': 'mariadb', 'root_password': 'mariadb', 'port': 3306, 'ip': '', 'ssh_user': '', 'ssh_key': ''}) + keystone_db = resource.create('keystone_db', '/vagrant/resources/mariadb_keystone_db/', {'db_name': 'keystone_db', 'login_password': '', 'login_user': 'root', 'login_port': '', 'ip': '', 'ssh_user': '', 'ssh_key': ''}) + + signals.connect(node1, mariadb_service1) + signals.connect(node1, keystone_db) + signals.connect(mariadb_service1, keystone_db, {'root_password': 'login_password', 'port': 'login_port'}) + return resource.load_all() + + +@patch('solar.core.actions.resource_action') +def test_update_port_on_mariadb(maction, resources): + operations.stage_changes() + operations.commit_changes() + + mariadb = resources['mariadb'] + + mariadb.update({'port': 4400}) + + log = operations.stage_changes() + + assert len(log) == 2 + + mariadb_log = log.items[0] + + assert mariadb_log.diff == [ + ('change', u'input.port.value', (3306, 4400)), + ('change', u'metadata.input.port.value', (3306, 4400))] + + keystone_db_log = log.items[1] + + assert keystone_db_log.diff == [ + ('change', u'input.login_port.value', (3306, 4400)), + ('change', u'metadata.input.login_port.value', (3306, 4400))] + + +@pytest.fixture +def list_input(): + res1 = resource.wrap_resource( + {'id': 'res1', 'input': {'ip': {'value': '10.10.0.2'}}}) + res1.save() + res2 = resource.wrap_resource( + {'id': 'res2', 'input': {'ip': {'value': '10.10.0.3'}}}) + res2.save() + consumer = resource.wrap_resource( + {'id': 'consumer', 'input': + {'ips': {'value': [], + 'schema': ['str']}}}) + consumer.save() + + signals.connect(res1, consumer, {'ip': 'ips'}) + signals.connect(res2, consumer, {'ip': 'ips'}) + return resource.load_all() + + +@patch('solar.core.actions.resource_action') +def test_update_list_resource(maction, list_input): + operations.stage_changes() + operations.commit_changes() + + res3 = resource.wrap_resource( + {'id': 'res3', 'input': {'ip': {'value': '10.10.0.4'}}}) + res3.save() + signals.connect(res3, list_input['consumer'], {'ip': 'ips'}) + + log = operations.stage_changes() + + assert len(log) == 2 + + assert log.items[0].res == res3.name + assert log.items[1].diff == [ + ('add', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]), + ('add', u'input.ips', [ + (2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]), + ('add', u'metadata.input.ips.value', + [(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])] + + operations.commit_changes() + assert list_input['consumer'].args_dict() == { + u'ips': [ + {u'emitter_attached_to': u'res1', u'emitter': u'ip', u'value': u'10.10.0.2'}, + {u'emitter_attached_to': u'res2', u'emitter': u'ip', u'value': u'10.10.0.3'}, + {'emitter_attached_to': 'res3', 'emitter': 'ip', 'value': '10.10.0.4'}]} + + log_item = operations.rollback_last() + assert log_item.diff == [ + ('remove', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]), + ('remove', u'input.ips', [ + (2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]), + ('remove', u'metadata.input.ips.value', + [(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])] + + consumer = resource.load('consumer') + assert consumer.args_dict() == { + u'ips': [{u'emitter': u'ip', + u'emitter_attached_to': u'res1', + u'value': u'10.10.0.2'}, + {u'emitter': u'ip', + u'emitter_attached_to': u'res2', + u'value': u'10.10.0.3'}]} + + diff --git a/solar/solar/test/test_validation.py b/solar/solar/test/test_validation.py index 3480b460..0e5339bb 100644 --- a/solar/solar/test/test_validation.py +++ b/solar/solar/test/test_validation.py @@ -1,7 +1,5 @@ import unittest -from pytest import mark - from solar.test import base from solar.core import validation as sv From 831ab2e08ba9883117404fd3f3067e1af565a8c5 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 10 Jun 2015 17:34:05 +0300 Subject: [PATCH 8/8] Create resource for tests --- .gitignore | 1 + example.py | 1 - run_tests.sh | 3 +-- solar/solar/core/resource.py | 12 +++++----- solar/solar/interfaces/db/redis_db.py | 8 +++---- .../solar/test/test_stage_commit_procedure.py | 15 ++++++++----- .../solar/test/test_update_propagated_data.py | 22 ++++++++++++++----- 7 files changed, 39 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index 596446eb..f635cdf9 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ tmp/ state/ clients.json rs/ +x-venv/ diff --git a/example.py b/example.py index f3869ee0..d8585359 100644 --- a/example.py +++ b/example.py @@ -170,7 +170,6 @@ def deploy(): signals.Connections.flush() - has_errors = False for r in locals().values(): if not isinstance(r, resource.Resource): diff --git a/run_tests.sh b/run_tests.sh index 531344d0..fa31e702 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -20,7 +20,6 @@ pip install -r solar/requirements.txt --download-cache=/tmp/$JOB_NAME pushd solar/solar -PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE python test/test_signals.py -PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE python test/test_validation.py +PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE py.test test/ popd diff --git a/solar/solar/core/resource.py b/solar/solar/core/resource.py index 8342a3a4..4f36186e 100644 --- a/solar/solar/core/resource.py +++ b/solar/solar/core/resource.py @@ -26,7 +26,11 @@ class Resource(object): self.metadata = metadata self.actions = metadata.get('actions', {}).keys() or None self.args = {} + self.set_args(args) + self.changed = [] + self.tags = tags or [] + def set_args(self, args): for arg_name, arg_value in args.items(): if not self.metadata['input'].get(arg_name): continue @@ -39,11 +43,9 @@ class Resource(object): value = metadata_arg['value'] self.args[arg_name] = observer.create(type_, self, arg_name, value) - self.changed = [] - self.tags = tags or [] def __repr__(self): - return ("Resource(name='{name}', metadata={metadata}, args={args}, " + return ("Resource(name='{id}', metadata={metadata}, args={input}, " "tags={tags})").format(**self.to_dict()) def color_repr(self): @@ -51,8 +53,8 @@ class Resource(object): arg_color = 'yellow' - return ("{resource_s}({name_s}='{name}', {metadata_s}={metadata}, " - "{args_s}={args}, {tags_s}={tags})").format( + return ("{resource_s}({name_s}='{id}', {metadata_s}={metadata}, " + "{args_s}={input}, {tags_s}={tags})").format( resource_s=click.style('Resource', fg='white', bold=True), name_s=click.style('name', fg=arg_color, bold=True), metadata_s=click.style('metadata', fg=arg_color, bold=True), diff --git a/solar/solar/interfaces/db/redis_db.py b/solar/solar/interfaces/db/redis_db.py index 0f0aaec0..abd47710 100644 --- a/solar/solar/interfaces/db/redis_db.py +++ b/solar/solar/interfaces/db/redis_db.py @@ -26,14 +26,14 @@ class RedisDB(object): def read(self, uid, collection=COLLECTIONS.resource): try: return json.loads( - self._r.get(self._make_key(collection.name, uid)) + self._r.get(self._make_key(collection, uid)) ) except TypeError: return None def save(self, uid, data, collection=COLLECTIONS.resource): return self._r.set( - self._make_key(collection.name, uid), + self._make_key(collection, uid), json.dumps(data) ) @@ -41,7 +41,7 @@ class RedisDB(object): return self._r.delete(self._make_key(collection, uid)) def get_list(self, collection=COLLECTIONS.resource): - key_glob = self._make_key(collection.name, '*') + key_glob = self._make_key(collection, '*') for key in self._r.keys(key_glob): yield json.loads(self._r.get(key)) @@ -50,7 +50,7 @@ class RedisDB(object): self._r.flushdb() def _make_key(self, collection, _id): - return '{0}:{1}'.format(collection, _id) + return '{0}:{1}'.format(collection.name, _id) class FakeRedisDB(RedisDB): diff --git a/solar/solar/test/test_stage_commit_procedure.py b/solar/solar/test/test_stage_commit_procedure.py index 86424a57..7da48872 100644 --- a/solar/solar/test/test_stage_commit_procedure.py +++ b/solar/solar/test/test_stage_commit_procedure.py @@ -11,12 +11,17 @@ from solar import state def default_resources(): from solar.core import signals from solar.core import resource - node1 = resource.create('node1', '/vagrant/resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'}) - rabbitmq_service1 = resource.create('rabbitmq', '/vagrant/resources/rabbitmq_service/', {'ssh_user': '', 'ip': '','management_port': '15672', 'port': '5672', 'ssh_key': '', 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'}) - openstack_vhost = resource.create('vhost', '/vagrant/resources/rabbitmq_vhost/', {'ssh_user': '', 'ip': '', 'ssh_key': '', 'vhost_name': 'openstack', 'container_name': ''}) + node1 = resource.wrap_resource( + {'id': 'node1', + 'input': {'ip': {'value':'10.0.0.3'}}}) + node1.save() + rabbitmq_service1 = resource.wrap_resource( + {'id':'rabbitmq', 'input': { + 'ip' : {'value': ''}, + 'image': {'value': 'rabbitmq:3-management'}}}) + rabbitmq_service1.save() signals.connect(node1, rabbitmq_service1) - signals.connect(rabbitmq_service1, openstack_vhost) return resource.load_all() @@ -25,7 +30,7 @@ def default_resources(): def test_changes_on_update_image(maction): log = operations.stage_changes() - assert len(log) == 3 + assert len(log) == 2 operations.commit_changes() diff --git a/solar/solar/test/test_update_propagated_data.py b/solar/solar/test/test_update_propagated_data.py index a8fa7153..0134d177 100644 --- a/solar/solar/test/test_update_propagated_data.py +++ b/solar/solar/test/test_update_propagated_data.py @@ -9,14 +9,24 @@ from solar import state @pytest.fixture def resources(): - node1 = resource.create('node1', '/vagrant/resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/', 'ssh_user': 'vagrant'}) - - mariadb_service1 = resource.create('mariadb', '/vagrant/resources/mariadb_service', {'image': 'mariadb', 'root_password': 'mariadb', 'port': 3306, 'ip': '', 'ssh_user': '', 'ssh_key': ''}) - keystone_db = resource.create('keystone_db', '/vagrant/resources/mariadb_keystone_db/', {'db_name': 'keystone_db', 'login_password': '', 'login_user': 'root', 'login_port': '', 'ip': '', 'ssh_user': '', 'ssh_key': ''}) - + node1 = resource.wrap_resource( + {'id': 'node1', + 'input': {'ip': {'value':'10.0.0.3'}}}) + node1.save() + mariadb_service1 = resource.wrap_resource( + {'id':'mariadb', 'input': { + 'port' : {'value': 3306}, + 'ip': {'value': ''}}}) + mariadb_service1.save() + keystone_db = resource.wrap_resource( + {'id':'keystone_db', + 'input': { + 'login_port' : {'value': ''}, + 'ip': {'value': ''}}}) + keystone_db.save() signals.connect(node1, mariadb_service1) signals.connect(node1, keystone_db) - signals.connect(mariadb_service1, keystone_db, {'root_password': 'login_password', 'port': 'login_port'}) + signals.connect(mariadb_service1, keystone_db, {'port': 'login_port'}) return resource.load_all()