From de0d00b6e9ebaced3ad49d050af87cc3f7c6382b Mon Sep 17 00:00:00 2001 From: Dmitry Bogun Date: Mon, 28 Nov 2016 17:06:41 +0200 Subject: [PATCH] "newton" bareon patch --- ironic/api/controllers/v1/node.py | 5 +++ ironic/common/context.py | 1 - ironic/common/images.py | 13 ++++--- ironic/common/states.py | 6 +++ ironic/common/swift.py | 21 +++++++++++ ironic/conductor/manager.py | 5 +++ ironic/conductor/task_manager.py | 2 +- ironic/drivers/base.py | 7 ++++ ironic/drivers/modules/image_cache.py | 43 ++++++++++++++-------- ironic/tests/unit/common/test_rpc.py | 6 +-- ironic/tests/unit/common/test_swift.py | 1 + .../tests/unit/drivers/modules/test_image_cache.py | 17 +++++---- 12 files changed, 91 insertions(+), 36 deletions(-) diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py index 5cb2ccc..a28e6ea 100644 --- a/ironic/api/controllers/v1/node.py +++ b/ironic/api/controllers/v1/node.py @@ -35,6 +35,7 @@ from ironic.api.controllers.v1 import utils as api_utils from ironic.api.controllers.v1 import versions from ironic.api import expose from ironic.common import exception +from ironic.common import driver_factory from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states as ir_states @@ -538,6 +539,10 @@ class NodeStatesController(rest.RestController): m = ir_states.machine.copy() m.initialize(rpc_node.provision_state) + + ir_states.apply_driver_patches( + m, driver_factory.get_driver(rpc_node.driver)) + if not m.is_actionable_event(ir_states.VERBS.get(target, target)): # Normally, we let the task manager recognize and deal with # NodeLocked exceptions. However, that isn't done until the RPC diff --git a/ironic/common/context.py b/ironic/common/context.py index 6e9c7e3..f708e4d 100644 --- a/ironic/common/context.py +++ b/ironic/common/context.py @@ -71,7 +71,6 @@ class RequestContext(context.RequestContext): @classmethod def from_dict(cls, values): values.pop('user', None) - values.pop('tenant', None) return cls(**values) def ensure_thread_contain_context(self): diff --git a/ironic/common/images.py b/ironic/common/images.py index 908671c..ddc87e1 100644 --- a/ironic/common/images.py +++ b/ironic/common/images.py @@ -292,16 +292,17 @@ def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk, raise exception.ImageCreationFailed(image_type='iso', error=e) -def fetch(context, image_href, path, force_raw=False): +def fetch(context, image_href, path, force_raw=False, image_service=None): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - image_service = service.get_image_service(image_href, - context=context) - LOG.debug("Using %(image_service)s to download image %(image_href)s." % - {'image_service': image_service.__class__, - 'image_href': image_href}) + if not image_service: + image_service = service.get_image_service(image_href, + context=context) + LOG.debug("Using %(image_service)s to download image %(image_href)s." % + {'image_service': image_service.__class__, + 'image_href': image_href}) with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: diff --git a/ironic/common/states.py b/ironic/common/states.py index f14ce5e..9de5c2b 100644 --- a/ironic/common/states.py +++ b/ironic/common/states.py @@ -382,3 +382,9 @@ machine.add_transition(ADOPTFAIL, ADOPTING, 'adopt') # A node that failed adoption can be moved back to manageable machine.add_transition(ADOPTFAIL, MANAGEABLE, 'manage') + + +def apply_driver_patches(machine, driver): + # Is deployment can be terminated + if driver.deploy.can_terminate_deployment: + machine.add_transition(DEPLOYING, DELETING, 'delete') diff --git a/ironic/common/swift.py b/ironic/common/swift.py index c16cb3c..e8452e0 100644 --- a/ironic/common/swift.py +++ b/ironic/common/swift.py @@ -173,3 +173,24 @@ class SwiftAPI(object): except swift_exceptions.ClientException as e: operation = _("post object") raise exception.SwiftOperationError(operation=operation, error=e) + + # FIXME(dbogun): make tenant bound connection + def get_object(self, container, object, object_headers=None, + chunk_size=None): + """Get Swift object. + + :param container: The name of the container in which Swift object + is placed. + :param object: The name of the object in Swift + :param object_headers: the headers for the object to pass to Swift + :param chunk_size: size of the chunk used read to read from response + :returns: Tuple (body, headers) + :raises: SwiftOperationError, if operation with Swift fails. + """ + try: + return self.connection.get_object(container, object, + headers=object_headers, + resp_chunk_size=chunk_size) + except swift_exceptions.ClientException as e: + operation = _("get object") + raise exception.SwiftOperationError(operation=operation, error=e) diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py index a83c71b..d63cd06 100644 --- a/ironic/conductor/manager.py +++ b/ironic/conductor/manager.py @@ -515,6 +515,11 @@ class ConductorManager(base_manager.BaseConductorManager): """ LOG.debug("RPC do_node_tear_down called for node %s." % node_id) + with task_manager.acquire(context, node_id, shared=True) as task: + if (task.node.provision_state == states.DEPLOYING and + task.driver.deploy.can_terminate_deployment): + task.driver.deploy.terminate_deployment(task) + with task_manager.acquire(context, node_id, shared=False, purpose='node tear down') as task: try: diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py index a709e48..8993cf7 100644 --- a/ironic/conductor/task_manager.py +++ b/ironic/conductor/task_manager.py @@ -217,7 +217,7 @@ class TaskManager(object): self.node.id) self.driver = driver_factory.build_driver_for_task( self, driver_name=driver_name) - + states.apply_driver_patches(self.fsm, self.driver) except Exception: with excutils.save_and_reraise_exception(): self.release_resources() diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py index af4aab9..e7b966a 100644 --- a/ironic/drivers/base.py +++ b/ironic/drivers/base.py @@ -401,6 +401,13 @@ class DeployInterface(BaseInterface): 'the driver %(driver)s does not support heartbeating'), {'node': task.node.uuid, 'driver': task.node.driver}) + def terminate_deployment(self, *args, **kwargs): + pass + + @property + def can_terminate_deployment(self): + return False + @six.add_metaclass(abc.ABCMeta) class BootInterface(object): diff --git a/ironic/drivers/modules/image_cache.py b/ironic/drivers/modules/image_cache.py index 541cce4..42e0026 100644 --- a/ironic/drivers/modules/image_cache.py +++ b/ironic/drivers/modules/image_cache.py @@ -26,6 +26,7 @@ import uuid from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import fileutils +from oslo_utils import uuidutils import six from ironic.common import exception @@ -48,7 +49,8 @@ _cache_cleanup_list = [] class ImageCache(object): """Class handling access to cache for master images.""" - def __init__(self, master_dir, cache_size, cache_ttl): + def __init__(self, master_dir, cache_size, cache_ttl, + image_service=None): """Constructor. :param master_dir: cache directory to work on @@ -59,6 +61,7 @@ class ImageCache(object): self.master_dir = master_dir self._cache_size = cache_size self._cache_ttl = cache_ttl + self._image_service = image_service if master_dir is not None: fileutils.ensure_tree(master_dir) @@ -83,23 +86,28 @@ class ImageCache(object): # NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, 'ironic-'): - _fetch(ctx, href, dest_path, force_raw) + _fetch(ctx, href, dest_path, + image_service=self._image_service, + force_raw=force_raw) else: - _fetch(ctx, href, dest_path, force_raw) + _fetch(ctx, href, dest_path, image_service=self._image_service, + force_raw=force_raw) return # TODO(ghe): have hard links and counts the same behaviour in all fs - # NOTE(vdrok): File name is converted to UUID if it's not UUID already, - # so that two images with same file names do not collide - if service_utils.is_glance_image(href): - master_file_name = service_utils.parse_image_ref(href)[0] + if uuidutils.is_uuid_like(href): + master_file_name = href + + elif (self._image_service and + hasattr(self._image_service, 'get_image_unique_id')): + master_file_name = self._image_service.get_image_unique_id(href) + else: - # NOTE(vdrok): Doing conversion of href in case it's unicode - # string, UUID cannot be generated for unicode strings on python 2. href_encoded = href.encode('utf-8') if six.PY2 else href master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL, href_encoded)) + master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: @@ -110,8 +118,8 @@ class ImageCache(object): # NOTE(vdrok): After rebuild requested image can change, so we # should ensure that dest_path and master_path (if exists) are # pointing to the same file and their content is up to date - cache_up_to_date = _delete_master_path_if_stale(master_path, href, - ctx) + cache_up_to_date = _delete_master_path_if_stale( + master_path, href, ctx, img_service=self._image_service) dest_up_to_date = _delete_dest_path_if_stale(master_path, dest_path) @@ -157,7 +165,8 @@ class ImageCache(object): tmp_path = os.path.join(tmp_dir, href.split('/')[-1]) try: - _fetch(ctx, href, tmp_path, force_raw) + _fetch(ctx, href, tmp_path, force_raw, + image_service=self._image_service) # NOTE(dtantsur): no need for global lock here - master_path # will have link count >1 at any moment, so won't be cleaned up os.link(tmp_path, master_path) @@ -298,10 +307,11 @@ def _free_disk_space_for(path): return stat.f_frsize * stat.f_bavail -def _fetch(context, image_href, path, force_raw=False): +def _fetch(context, image_href, path, force_raw=False, image_service=None): """Fetch image and convert to raw format if needed.""" path_tmp = "%s.part" % path - images.fetch(context, image_href, path_tmp, force_raw=False) + images.fetch(context, image_href, path_tmp, force_raw=False, + image_service=image_service) # Notes(yjiang5): If glance can provide the virtual size information, # then we can firstly clean cache and then invoke images.fetch(). if force_raw: @@ -374,7 +384,7 @@ def cleanup(priority): return _add_property_to_class_func -def _delete_master_path_if_stale(master_path, href, ctx): +def _delete_master_path_if_stale(master_path, href, ctx, img_service=None): """Delete image from cache if it is not up to date with href contents. :param master_path: path to an image in master cache @@ -387,7 +397,8 @@ def _delete_master_path_if_stale(master_path, href, ctx): # Glance image contents cannot be updated without changing image's UUID return os.path.exists(master_path) if os.path.exists(master_path): - img_service = image_service.get_image_service(href, context=ctx) + if not img_service: + img_service = image_service.get_image_service(href, context=ctx) img_mtime = img_service.show(href).get('updated_at') if not img_mtime: # This means that href is not a glance image and doesn't have an diff --git a/ironic/tests/unit/common/test_rpc.py b/ironic/tests/unit/common/test_rpc.py index 197f553..71fe5f8 100644 --- a/ironic/tests/unit/common/test_rpc.py +++ b/ironic/tests/unit/common/test_rpc.py @@ -180,10 +180,8 @@ class TestRequestContextSerializer(base.TestCase): def test_deserialize_context(self): self.context.user = 'fake-user' - self.context.tenant = 'fake-tenant' serialize_values = self.context.to_dict() new_context = self.serializer.deserialize_context(serialize_values) - # Ironic RequestContext from_dict will pop 'user' and 'tenant' and - # initialize to None. + # Ironic RequestContext from_dict will pop 'user' and initialize + # to None. self.assertIsNone(new_context.user) - self.assertIsNone(new_context.tenant) diff --git a/ironic/tests/unit/common/test_swift.py b/ironic/tests/unit/common/test_swift.py index e5bc306..e04d8e2 100644 --- a/ironic/tests/unit/common/test_swift.py +++ b/ironic/tests/unit/common/test_swift.py @@ -108,6 +108,7 @@ class SwiftTestCase(base.TestCase): connection_obj_mock.url = 'http://host/v1/AUTH_tenant_id' head_ret_val = {'x-account-meta-temp-url-key': 'secretkey'} connection_obj_mock.head_account.return_value = head_ret_val + connection_obj_mock.os_options = {} gen_temp_url_mock.return_value = 'temp-url-path' temp_url_returned = swiftapi.get_temp_url('container', 'object', 10) connection_obj_mock.head_account.assert_called_once_with() diff --git a/ironic/tests/unit/drivers/modules/test_image_cache.py b/ironic/tests/unit/drivers/modules/test_image_cache.py index 1224f52..ed9d83c 100644 --- a/ironic/tests/unit/drivers/modules/test_image_cache.py +++ b/ironic/tests/unit/drivers/modules/test_image_cache.py @@ -59,7 +59,7 @@ class TestImageCacheFetch(base.TestCase): self.cache.fetch_image(self.uuid, self.dest_path) self.assertFalse(mock_download.called) mock_fetch.assert_called_once_with( - None, self.uuid, self.dest_path, True) + None, self.uuid, self.dest_path, True, image_service=None) self.assertFalse(mock_clean_up.called) @mock.patch.object(image_cache.ImageCache, 'clean_up', autospec=True) @@ -75,7 +75,7 @@ class TestImageCacheFetch(base.TestCase): mock_clean_up): self.cache.fetch_image(self.uuid, self.dest_path) mock_cache_upd.assert_called_once_with(self.master_path, self.uuid, - None) + None, img_service=None) mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path) self.assertFalse(mock_link.called) self.assertFalse(mock_download.called) @@ -94,7 +94,7 @@ class TestImageCacheFetch(base.TestCase): mock_clean_up): self.cache.fetch_image(self.uuid, self.dest_path) mock_cache_upd.assert_called_once_with(self.master_path, self.uuid, - None) + None, img_service=None) mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path) mock_link.assert_called_once_with(self.master_path, self.dest_path) self.assertFalse(mock_download.called) @@ -113,7 +113,7 @@ class TestImageCacheFetch(base.TestCase): mock_clean_up): self.cache.fetch_image(self.uuid, self.dest_path) mock_cache_upd.assert_called_once_with(self.master_path, self.uuid, - None) + None, img_service=None) mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path) self.assertFalse(mock_link.called) mock_download.assert_called_once_with( @@ -134,7 +134,7 @@ class TestImageCacheFetch(base.TestCase): mock_clean_up): self.cache.fetch_image(self.uuid, self.dest_path) mock_cache_upd.assert_called_once_with(self.master_path, self.uuid, - None) + None, img_service=None) mock_dest_upd.assert_called_once_with(self.master_path, self.dest_path) self.assertFalse(mock_link.called) mock_download.assert_called_once_with( @@ -158,7 +158,7 @@ class TestImageCacheFetch(base.TestCase): @mock.patch.object(image_cache, '_fetch', autospec=True) def test__download_image(self, mock_fetch): - def _fake_fetch(ctx, uuid, tmp_path, *args): + def _fake_fetch(ctx, uuid, tmp_path, *args, **kwargs): self.assertEqual(self.uuid, uuid) self.assertNotEqual(self.dest_path, tmp_path) self.assertNotEqual(os.path.dirname(tmp_path), self.master_dir) @@ -446,7 +446,7 @@ class TestImageCacheCleanUp(base.TestCase): @mock.patch.object(utils, 'rmtree_without_raise', autospec=True) @mock.patch.object(image_cache, '_fetch', autospec=True) def test_temp_images_not_cleaned(self, mock_fetch, mock_rmtree): - def _fake_fetch(ctx, uuid, tmp_path, *args): + def _fake_fetch(ctx, uuid, tmp_path, *args, **kwargs): with open(tmp_path, 'w') as fp: fp.write("TEST" * 10) @@ -691,7 +691,8 @@ class TestFetchCleanup(base.TestCase): mock_size.return_value = 100 image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True) mock_fetch.assert_called_once_with('fake', 'fake-uuid', - '/foo/bar.part', force_raw=False) + '/foo/bar.part', image_service=None, + force_raw=False) mock_clean.assert_called_once_with('/foo', 100) mock_raw.assert_called_once_with('fake-uuid', '/foo/bar', '/foo/bar.part') -- 2.7.3