retire project
Change-Id: I06f59810c4037e4cf04a078312be2ad41d1d039f
This commit is contained in:
parent
bd2c050782
commit
c0582e2342
12
.gitignore
vendored
12
.gitignore
vendored
@ -1,12 +0,0 @@
|
||||
.idea/*
|
||||
*.pyc
|
||||
dist/
|
||||
*.egg-info/
|
||||
*.egg/
|
||||
build/
|
||||
.coverage
|
||||
.testrepository/
|
||||
.tox/
|
||||
cover/
|
||||
htmlcov/
|
||||
.eggs/
|
@ -1,4 +0,0 @@
|
||||
[gerrit]
|
||||
host=review.openstack.org
|
||||
port=29418
|
||||
project=stackforge/python-rackclient.git
|
@ -1,5 +0,0 @@
|
||||
[DEFAULT]
|
||||
test_command=RACK_IS_TEST=1 \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ ./ $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
201
LICENSE
201
LICENSE
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
84
README.md
84
README.md
@ -1,78 +1,10 @@
|
||||
Client library for RACK API
|
||||
=====================
|
||||
This project is no longer maintained.
|
||||
|
||||
## python-rackcilent
|
||||
The contents of this repository are still available in the Git
|
||||
source code management system. To see the contents of this
|
||||
repository before it reached its end of life, please check out the
|
||||
previous commit with "git checkout HEAD^1".
|
||||
|
||||
This is a client library for [**RACK**](https://github.com/stackforge/rack) API.
|
||||
It provides a python API and a command-line tool.
|
||||
|
||||
### Install
|
||||
|
||||
```
|
||||
git clone https://github.com/stackforge/python-rackclient.git
|
||||
cd python-rackclient
|
||||
pip install -r requirements.txt
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
## command-line
|
||||
|
||||
Once you have rackclient installed, you can use a shell command, `rack`, that interacts with RACK API.
|
||||
|
||||
You'll need to provide the endpoint of RACK API.
|
||||
You can set it with `--rack-url` option or as a environment variable:
|
||||
|
||||
```
|
||||
export RACK_URL=http://example.com:8088/v1
|
||||
```
|
||||
|
||||
Now you are all set.
|
||||
|
||||
You'll find complete documentation on the shell by running `rack help` command.
|
||||
|
||||
## python API
|
||||
|
||||
There's also a python API.
|
||||
This is a very simple example:
|
||||
|
||||
```
|
||||
from rackclient.v1 import client
|
||||
|
||||
# main client object
|
||||
rack = client.Client(rack_url='http://192.168.100.218:8088/v1')
|
||||
|
||||
# create a process group
|
||||
group = rack.groups.create(name='test-group')
|
||||
|
||||
# create a keypair
|
||||
rack.keypairs.create(gid=group.gid, is_default=True)
|
||||
|
||||
# create a securitygroup
|
||||
rules = [
|
||||
{
|
||||
"protocol": "tcp",
|
||||
"port_range_max": "65535",
|
||||
"port_range_min": "1",
|
||||
"remote_ip_prefix": "0.0.0.0/0"
|
||||
}
|
||||
]
|
||||
rack.securitygroups.create(gid=group.gid, is_default=True, securitygroup_rules=rules)
|
||||
|
||||
# create a network
|
||||
rack.networks.create(gid=group.gid, cidr="10.10.10.0/24", dns_nameservers=["8.8.8.8"], ext_router_id="eda01125-8c40-41dd-a694-db7578ed7725")
|
||||
|
||||
# boot a rack-proxy
|
||||
rack.proxy.create(gid=group.gid, nova_flavor_id="2", glance_image_id="42ec5e05-ade9-426d-a70a-fd5e02fcf261")
|
||||
|
||||
# boot a process
|
||||
process = rack.processes.create(gid=group.gid, nova_flavor_id="2", glance_image_id="90d8d31c-6386-4740-8ae5-e8a80b8fc6dd")
|
||||
|
||||
# You can access the process's context as object's attribute
|
||||
print process.gid, process.pid, process.name, ...
|
||||
```
|
||||
|
||||
|
||||
## RACK Project Resources
|
||||
|
||||
* [Wiki](https://wiki.openstack.org/wiki/RACK)
|
||||
* [Source Code](https://github.com/stackforge/rack)
|
||||
For any further questions, please email
|
||||
openstack-dev@lists.openstack.org or join #openstack-dev on
|
||||
Freenode.
|
||||
|
@ -1,61 +0,0 @@
|
||||
[group]
|
||||
### Required params ###
|
||||
name =
|
||||
|
||||
### Not required params ###
|
||||
#description =
|
||||
|
||||
[keypair]
|
||||
### Not required params ###
|
||||
#name =
|
||||
|
||||
# True or False
|
||||
#is_default =
|
||||
|
||||
[network]
|
||||
### Required params ###
|
||||
cidr =
|
||||
|
||||
# UUID value
|
||||
ext_router_id =
|
||||
|
||||
### Not required params ###
|
||||
# True or False
|
||||
#is_admin =
|
||||
|
||||
# IP address
|
||||
#gateway_ip =
|
||||
|
||||
# Space separated value
|
||||
# Syntax: dns_nameservers = x.x.x.x y.y.y.y
|
||||
#dns_nameservers =
|
||||
|
||||
[securitygroup]
|
||||
### Required params ###
|
||||
# Syntax:
|
||||
# rules = protocol=[tcp|udp|icmp],port_range_max=[1-65535],port_range_min=[1-65535],[remote_ip_prefix=x.x.x.x/xx|remote_securitygroup_id=xx]
|
||||
#
|
||||
# Examples (you can specify more than one rules as a space separated value):
|
||||
# rules =
|
||||
# protocol=tcp,port_range_max=80,port_range_min=80,remote_ip_prefix=10.0.0.0/24
|
||||
# protocol=udp,port_range_max=1,port_range_min=1023,remote_securitygroup_id=0dae9752-7e60-4da1-8c98-616e8b97ce8d
|
||||
# protocol=icmp,remote_ip_prefix=10.0.0.0/24
|
||||
rules =
|
||||
|
||||
### Not required params ###
|
||||
#name =
|
||||
|
||||
# True or False
|
||||
#is_default =
|
||||
|
||||
[proxy]
|
||||
### Required params ###
|
||||
# UUID value
|
||||
nova_flavor_id =
|
||||
|
||||
# UUID value
|
||||
glance_image_id =
|
||||
|
||||
### Not required params ###
|
||||
#name =
|
||||
|
@ -1,15 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The list of modules to copy from oslo-incubator
|
||||
module=apiclient
|
||||
module=cliutils
|
||||
module=gettextutils
|
||||
module=importutils
|
||||
module=jsonutils
|
||||
module=network_utils
|
||||
module=strutils
|
||||
module=timeutils
|
||||
module=uuidutils
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=rackclient
|
@ -1,135 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
from rackclient import exceptions
|
||||
from rackclient.openstack.common import importutils
|
||||
|
||||
|
||||
class HTTPClient(object):
|
||||
|
||||
def __init__(self, rack_url=None, http_log_debug=False):
|
||||
self.rack_url = rack_url.rstrip('/')
|
||||
self.http_log_debug = http_log_debug
|
||||
self._logger = logging.getLogger(__name__)
|
||||
|
||||
if self.http_log_debug and not self._logger.handlers:
|
||||
ch = logging.StreamHandler()
|
||||
self._logger.addHandler(ch)
|
||||
self._logger.propagate = False
|
||||
|
||||
def http_log_req(self, method, url, kwargs):
|
||||
if not self.http_log_debug:
|
||||
return
|
||||
|
||||
string_parts = ['curl -i']
|
||||
string_parts.append(" '%s'" % url)
|
||||
string_parts.append(' -X %s' % method)
|
||||
|
||||
headers = copy.deepcopy(kwargs['headers'])
|
||||
keys = sorted(headers.keys())
|
||||
for name in keys:
|
||||
value = headers[name]
|
||||
header = ' -H "%s: %s"' % (name, value)
|
||||
string_parts.append(header)
|
||||
|
||||
if 'data' in kwargs:
|
||||
data = json.loads(kwargs['data'])
|
||||
string_parts.append(" -d '%s'" % json.dumps(data))
|
||||
self._logger.debug("REQ: %s" % "".join(string_parts))
|
||||
|
||||
def http_log_resp(self, resp):
|
||||
if not self.http_log_debug:
|
||||
return
|
||||
|
||||
if resp.text and resp.status_code != 400:
|
||||
try:
|
||||
body = json.loads(resp.text)
|
||||
except ValueError:
|
||||
body = None
|
||||
else:
|
||||
body = None
|
||||
|
||||
self._logger.debug("RESP: [%(status)s] %(headers)s\nRESP BODY: "
|
||||
"%(text)s\n", {'status': resp.status_code,
|
||||
'headers': resp.headers,
|
||||
'text': json.dumps(body)})
|
||||
|
||||
def request(self, url, method, **kwargs):
|
||||
kwargs.setdefault('headers', {})
|
||||
kwargs['headers']['User-Agent'] = 'python-rackclient'
|
||||
kwargs['headers']['Accept'] = 'application/json'
|
||||
if 'body' in kwargs:
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
kwargs['data'] = json.dumps(kwargs['body'])
|
||||
del kwargs['body']
|
||||
|
||||
url = self.rack_url + url
|
||||
|
||||
self.http_log_req(method, url, kwargs)
|
||||
|
||||
request_func = requests.request
|
||||
resp = request_func(
|
||||
method,
|
||||
url,
|
||||
**kwargs)
|
||||
|
||||
self.http_log_resp(resp)
|
||||
|
||||
if resp.text:
|
||||
try:
|
||||
body = json.loads(resp.text)
|
||||
except ValueError:
|
||||
body = None
|
||||
else:
|
||||
body = None
|
||||
|
||||
if resp.status_code >= 400:
|
||||
raise exceptions.from_response(resp, body, url, method)
|
||||
|
||||
return resp, body
|
||||
|
||||
def get(self, url, **kwargs):
|
||||
return self.request(url, 'GET', **kwargs)
|
||||
|
||||
def post(self, url, **kwargs):
|
||||
return self.request(url, 'POST', **kwargs)
|
||||
|
||||
def put(self, url, **kwargs):
|
||||
return self.request(url, 'PUT', **kwargs)
|
||||
|
||||
def delete(self, url, **kwargs):
|
||||
return self.request(url, 'DELETE', **kwargs)
|
||||
|
||||
|
||||
def get_client_class(version):
|
||||
version_map = {
|
||||
'1': 'rackclient.v1.client.Client',
|
||||
}
|
||||
try:
|
||||
client_path = version_map[str(version)]
|
||||
except (KeyError, ValueError):
|
||||
msg = ("Invalid client version '%(version)s'. must be one of: "
|
||||
"%(keys)s") % {'version': version,
|
||||
'keys': ', '.join(version_map.keys())}
|
||||
raise exceptions.UnsupportedVersion(msg)
|
||||
|
||||
return importutils.import_class(client_path)
|
||||
|
||||
|
||||
def Client(version, *args, **kwargs):
|
||||
client_class = get_client_class(version)
|
||||
return client_class(*args, **kwargs)
|
@ -1,174 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class UnsupportedVersion(Exception):
|
||||
"""Indicates that the user is trying to use an unsupported
|
||||
version of the API.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class CommandError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class HTTPException(Exception):
|
||||
message = 'Unknown Error'
|
||||
|
||||
def __init__(self, code, message=None, details=None, request_id=None,
|
||||
url=None, method=None):
|
||||
self.code = code
|
||||
self.message = message or self.__class__.message
|
||||
self.details = details
|
||||
self.request_id = request_id
|
||||
self.url = url
|
||||
self.method = method
|
||||
|
||||
def __str__(self):
|
||||
formatted_string = "%s (HTTP %s)" % (self.message, self.code)
|
||||
if self.request_id:
|
||||
formatted_string += " (Request-ID: %s)" % self.request_id
|
||||
|
||||
return formatted_string
|
||||
|
||||
|
||||
class BadRequest(HTTPException):
|
||||
"""
|
||||
HTTP 400 - Bad request: you sent some malformed data.
|
||||
"""
|
||||
http_status = 400
|
||||
message = "Bad request"
|
||||
|
||||
|
||||
class NotFound(HTTPException):
|
||||
"""
|
||||
HTTP 404 - Not found
|
||||
"""
|
||||
http_status = 404
|
||||
message = "Not found"
|
||||
|
||||
|
||||
class InternalServerError(HTTPException):
|
||||
"""
|
||||
HTTP 500 - Internal Server Error
|
||||
"""
|
||||
http_status = 500
|
||||
message = "Internal Server Error"
|
||||
|
||||
|
||||
class RateLimit(HTTPException):
|
||||
"""
|
||||
HTTP 413 - Too much Requests
|
||||
"""
|
||||
http_status = 413
|
||||
message = "This request was rate-limited."
|
||||
|
||||
|
||||
_error_classes = [BadRequest, NotFound, InternalServerError, RateLimit]
|
||||
_code_map = dict((c.http_status, c) for c in _error_classes)
|
||||
|
||||
|
||||
def from_response(response, body, url, method=None):
|
||||
kwargs = {
|
||||
'code': response.status_code,
|
||||
'method': method,
|
||||
'url': url,
|
||||
'request_id': None,
|
||||
}
|
||||
|
||||
if response.headers:
|
||||
kwargs['request_id'] = response.headers.get('x-compute-request-id')
|
||||
|
||||
if body:
|
||||
message = "n/a"
|
||||
details = "n/a"
|
||||
|
||||
if hasattr(body, 'keys'):
|
||||
error = body[list(body)[0]]
|
||||
message = error.get('message')
|
||||
details = error.get('details')
|
||||
|
||||
kwargs['message'] = message
|
||||
kwargs['details'] = details
|
||||
|
||||
cls = _code_map.get(response.status_code, HTTPException)
|
||||
return cls(**kwargs)
|
||||
|
||||
|
||||
class BaseError(Exception):
|
||||
"""
|
||||
The base exception class for all exceptions except for HTTPException
|
||||
based classes.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ForkError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class AMQPConnectionError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidDirectoryError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidFilePathError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidFSEndpointError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class FileSystemAccessError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class MetadataAccessError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidProcessError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class ProcessInitError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class EndOfFile(Exception):
|
||||
message = 'EOF'
|
||||
|
||||
|
||||
class NoDescriptor(Exception):
|
||||
message = 'Descriptor Not Found'
|
||||
|
||||
def __init__(self, message=None):
|
||||
self.message = message or self.__class__.message
|
||||
|
||||
def __str__(self):
|
||||
formatted_string = self.message
|
||||
return formatted_string
|
||||
|
||||
|
||||
class NoReadDescriptor(NoDescriptor):
|
||||
message = 'Read Descriptor Not Found'
|
||||
|
||||
|
||||
class NoWriteDescriptor(NoDescriptor):
|
||||
message = 'Write Descriptor Not Found'
|
@ -1,21 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import initializing
|
||||
import os
|
||||
|
||||
RACK_CTX = None
|
||||
|
||||
if not os.environ.get("RACK_IS_TEST"):
|
||||
if not RACK_CTX:
|
||||
RACK_CTX = initializing.get_rack_context()
|
@ -1,126 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import cPickle
|
||||
import json
|
||||
import logging
|
||||
import pika
|
||||
import requests
|
||||
|
||||
from rackclient import exceptions
|
||||
from rackclient.client import Client
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
|
||||
META_URL = 'http://169.254.169.254/openstack/latest/meta_data.json'
|
||||
|
||||
|
||||
def get_rack_context(proxy_port=8088, client_version="1", api_version="v1"):
|
||||
|
||||
try:
|
||||
resp = requests.get(META_URL)
|
||||
metadata = json.loads(resp.text)["meta"]
|
||||
proxy_url = 'http://%s:%d/%s' % (
|
||||
metadata["proxy_ip"], proxy_port, api_version)
|
||||
client = Client(client_version, rack_url=proxy_url)
|
||||
proxy_info = client.proxy.get(metadata["gid"])
|
||||
|
||||
rack_ctx = type('', (object,), metadata)
|
||||
rack_ctx.ppid = getattr(rack_ctx, "ppid", "")
|
||||
rack_ctx.client = client
|
||||
rack_ctx.fs_endpoint = proxy_info.fs_endpoint
|
||||
rack_ctx.ipc_endpoint = proxy_info.ipc_endpoint
|
||||
rack_ctx.shm_endpoint = proxy_info.shm_endpoint
|
||||
|
||||
try:
|
||||
# Check if this process is not recognized by RACK.
|
||||
rack_ctx.client.processes.get(rack_ctx.gid, rack_ctx.pid)
|
||||
except exceptions.NotFound:
|
||||
msg = "This process is not recognized by RACK"
|
||||
raise exceptions.InvalidProcessError(msg)
|
||||
|
||||
if rack_ctx.ppid:
|
||||
LOG.debug("Messaging: send message to %s", rack_ctx.ppid)
|
||||
msg = _Messaging(rack_ctx)
|
||||
msg.send_msg(rack_ctx.ppid)
|
||||
while True:
|
||||
receive_msg = msg.receive_msg(
|
||||
getattr(rack_ctx, "msg_limit_time", 180))
|
||||
if receive_msg and rack_ctx.ppid == receive_msg.get("pid"):
|
||||
LOG.debug(
|
||||
"Messaging: receive message from %s",
|
||||
receive_msg.get("pid"))
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
msg = "Failed to initialize the process: %s." % e.message
|
||||
raise exceptions.ProcessInitError(msg)
|
||||
|
||||
return rack_ctx
|
||||
|
||||
|
||||
class _Messaging(object):
|
||||
|
||||
def __init__(self, ctx):
|
||||
self.ctx = ctx
|
||||
connection_param = pika.ConnectionParameters(self.ctx.proxy_ip)
|
||||
if self.ctx.ipc_endpoint:
|
||||
connection_param = pika.ConnectionParameters(self.ctx.ipc_endpoint)
|
||||
self.connection = pika.BlockingConnection(connection_param)
|
||||
self.channel = self.connection.channel()
|
||||
self.channel.exchange_declare(exchange=ctx.gid, type='topic')
|
||||
self.channel.queue_declare(queue=ctx.pid)
|
||||
self.channel.queue_bind(
|
||||
exchange=ctx.gid,
|
||||
queue=ctx.pid,
|
||||
routing_key=ctx.gid + '.' + ctx.pid)
|
||||
|
||||
def receive_msg(self, timeout_limit=180):
|
||||
queue_name = self.ctx.pid
|
||||
self.channel = self.connection.channel()
|
||||
receive = self.Receive()
|
||||
self.connection.add_timeout(
|
||||
deadline=int(timeout_limit),
|
||||
callback_method=receive.time_out)
|
||||
self.channel.basic_consume(
|
||||
receive.get_msg,
|
||||
queue=queue_name,
|
||||
no_ack=False)
|
||||
receive.channel = self.channel
|
||||
self.channel.start_consuming()
|
||||
return receive.message
|
||||
|
||||
def send_msg(self, target):
|
||||
routing_key = self.ctx.gid + '.' + target
|
||||
send_dict = {'pid': self.ctx.pid}
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
self.channel.basic_publish(
|
||||
exchange=self.ctx.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
class Receive(object):
|
||||
|
||||
def __init__(self):
|
||||
self.channel = None
|
||||
self.message = None
|
||||
|
||||
def get_msg(self, ch, method, properties, body):
|
||||
self.message = cPickle.loads(body)
|
||||
ch.basic_ack(delivery_tag=method.delivery_tag)
|
||||
ch.stop_consuming()
|
||||
|
||||
def time_out(self):
|
||||
self.channel.stop_consuming()
|
@ -1,155 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.lib import RACK_CTX
|
||||
from rackclient import exceptions
|
||||
from swiftclient import client as swift_client
|
||||
from swiftclient import exceptions as swift_exc
|
||||
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
SWIFT_PORT = 8080
|
||||
|
||||
|
||||
def _get_swift_client():
|
||||
if RACK_CTX.fs_endpoint:
|
||||
try:
|
||||
d = json.loads(RACK_CTX.fs_endpoint)
|
||||
credentials = {
|
||||
"user": d["os_username"],
|
||||
"key": d["os_password"],
|
||||
"tenant_name": d["os_tenant_name"],
|
||||
"authurl": d["os_auth_url"],
|
||||
"auth_version": "2"
|
||||
}
|
||||
return swift_client.Connection(**credentials)
|
||||
except (ValueError, KeyError):
|
||||
msg = "The format of fs_endpoint is invalid."
|
||||
raise exceptions.InvalidFSEndpointError(msg)
|
||||
else:
|
||||
authurl = "http://%s:%d/auth/v1.0" % (RACK_CTX.proxy_ip, SWIFT_PORT)
|
||||
|
||||
credentials = {
|
||||
"user": "rack:admin",
|
||||
"key": "admin",
|
||||
"authurl": authurl
|
||||
}
|
||||
swift = swift_client.Connection(**credentials)
|
||||
authurl, token = swift.get_auth()
|
||||
|
||||
return swift_client.Connection(preauthurl=authurl, preauthtoken=token)
|
||||
|
||||
|
||||
def listdir(directory):
|
||||
swift = _get_swift_client()
|
||||
directory = directory.strip('/')
|
||||
|
||||
files = []
|
||||
try:
|
||||
objects = swift.get_container(directory)[1]
|
||||
for o in objects:
|
||||
file_path = '/' + directory + '/' + o['name']
|
||||
files.append(File(file_path))
|
||||
except swift_exc.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
msg = "Directory '%s' does not exist." % directory
|
||||
raise exceptions.InvalidDirectoryError(msg)
|
||||
else:
|
||||
raise exceptions.FileSystemAccessError()
|
||||
|
||||
return files
|
||||
|
||||
|
||||
class File(object):
|
||||
|
||||
def __init__(self, file_path, mode="r"):
|
||||
self.path = file_path
|
||||
self.file = None
|
||||
if mode not in ('r', 'w'):
|
||||
raise ValueError(
|
||||
"mode must be 'r' or 'w', not %s" % mode)
|
||||
else:
|
||||
self.mode = mode
|
||||
|
||||
def get_name(self):
|
||||
return self.path.strip('/').split('/', 1)[1]
|
||||
|
||||
def get_directory(self):
|
||||
return self.path.strip('/').split('/', 1)[0]
|
||||
|
||||
def load(self, chunk_size=None):
|
||||
if self.file:
|
||||
return
|
||||
|
||||
if self.mode == 'r':
|
||||
self.file = tempfile.TemporaryFile()
|
||||
swift = _get_swift_client()
|
||||
|
||||
try:
|
||||
_, contents = swift.get_object(self.get_directory(),
|
||||
self.get_name(), chunk_size)
|
||||
if chunk_size:
|
||||
for c in contents:
|
||||
self.file.write(c)
|
||||
else:
|
||||
self.file.write(contents)
|
||||
self.file.flush()
|
||||
self.file.seek(0)
|
||||
except swift_exc.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
msg = "File '%s' does not exist." % self.path
|
||||
raise exceptions.InvalidFilePathError(msg)
|
||||
else:
|
||||
raise exceptions.FileSystemAccessError()
|
||||
|
||||
def write(self, *args, **kwargs):
|
||||
if not self.file:
|
||||
self.file = tempfile.TemporaryFile()
|
||||
|
||||
self.file.write(*args, **kwargs)
|
||||
|
||||
def close(self):
|
||||
if self.mode == 'w':
|
||||
swift = _get_swift_client()
|
||||
|
||||
try:
|
||||
swift.put_container(self.get_directory())
|
||||
self.file.seek(0)
|
||||
swift.put_object(self.get_directory(), self.get_name(),
|
||||
self.file)
|
||||
except swift_exc.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
msg = ("Directory '%s' does not exist. "
|
||||
"The file object will be closed."
|
||||
% self.get_directory())
|
||||
raise exceptions.InvalidDirectoryError(msg)
|
||||
else:
|
||||
msg = ("Could not save the file to the file system. "
|
||||
"The file object will be closed.")
|
||||
raise exceptions.FileSystemAccessError(msg)
|
||||
finally:
|
||||
self.file.close()
|
||||
|
||||
self.file.close()
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self.file:
|
||||
return getattr(self.file, name)
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'",
|
||||
self.__class__.__name__, name)
|
@ -1,115 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.lib import RACK_CTX
|
||||
from rackclient import exceptions
|
||||
|
||||
import cPickle
|
||||
import logging
|
||||
import pika
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Messaging(object):
|
||||
|
||||
def __init__(self):
|
||||
self.connection = self._create_connection()
|
||||
self.channel = self.connection.channel()
|
||||
self.declare_queue(RACK_CTX.pid)
|
||||
|
||||
def declare_queue(self, queue_name):
|
||||
queue_name = str(queue_name)
|
||||
self.channel.exchange_declare(exchange=RACK_CTX.gid, type='topic')
|
||||
self.channel.queue_declare(queue=queue_name)
|
||||
self.channel.queue_bind(exchange=RACK_CTX.gid,
|
||||
queue=queue_name,
|
||||
routing_key=RACK_CTX.gid + '.' + queue_name)
|
||||
|
||||
def receive_all_msg(self, queue_name=None,
|
||||
timeout_limit=180, msg_limit_count=None):
|
||||
if not queue_name:
|
||||
queue_name = RACK_CTX.pid
|
||||
|
||||
self.channel = self.connection.channel()
|
||||
receive = self.Receive()
|
||||
timeout_limit = int(timeout_limit)
|
||||
self.connection.add_timeout(deadline=timeout_limit,
|
||||
callback_method=receive.time_out)
|
||||
self.channel.basic_consume(receive.get_all_msg,
|
||||
queue=queue_name,
|
||||
no_ack=False)
|
||||
receive.channel = self.channel
|
||||
receive.msg_count_limit = msg_limit_count
|
||||
self.channel.start_consuming()
|
||||
return receive.message_list
|
||||
|
||||
def receive_msg(self, queue_name=None, timeout_limit=180):
|
||||
if not queue_name:
|
||||
queue_name = RACK_CTX.pid
|
||||
self.channel = self.connection.channel()
|
||||
receive = self.Receive()
|
||||
timeout_limit = int(timeout_limit)
|
||||
self.connection.add_timeout(deadline=timeout_limit,
|
||||
callback_method=receive.time_out)
|
||||
self.channel.basic_consume(receive.get_msg,
|
||||
queue=queue_name,
|
||||
no_ack=False)
|
||||
receive.channel = self.channel
|
||||
self.channel.start_consuming()
|
||||
return receive.message
|
||||
|
||||
class Receive(object):
|
||||
def __init__(self):
|
||||
self.channel = None
|
||||
self.message = None
|
||||
self.message_list = []
|
||||
self.msg_count_limit = None
|
||||
|
||||
def get_all_msg(self, ch, method, properties, body):
|
||||
ch.basic_ack(delivery_tag=method.delivery_tag)
|
||||
self.message_list.append(cPickle.loads(body))
|
||||
msg_count = len(self.message_list)
|
||||
LOG.debug("Received message count. %s", msg_count)
|
||||
if self.msg_count_limit and self.msg_count_limit <= msg_count:
|
||||
ch.stop_consuming()
|
||||
|
||||
def get_msg(self, ch, method, properties, body):
|
||||
self.message = cPickle.loads(body)
|
||||
ch.basic_ack(delivery_tag=method.delivery_tag)
|
||||
ch.stop_consuming()
|
||||
|
||||
def time_out(self):
|
||||
self.channel.stop_consuming()
|
||||
|
||||
def send_msg(self, target, message=None):
|
||||
routing_key = RACK_CTX.gid + '.' + target
|
||||
send_dict = {'pid': RACK_CTX.pid}
|
||||
if message:
|
||||
send_dict['message'] = message
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
self.channel.basic_publish(exchange=RACK_CTX.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
def _create_connection(self):
|
||||
if RACK_CTX.ipc_endpoint:
|
||||
connection_param = pika.ConnectionParameters(RACK_CTX.ipc_endpoint)
|
||||
else:
|
||||
connection_param = pika.ConnectionParameters(RACK_CTX.proxy_ip)
|
||||
try:
|
||||
connection = pika.BlockingConnection(connection_param)
|
||||
except pika.exceptions.AMQPConnectionError as e:
|
||||
raise exceptions.AMQPConnectionError(e)
|
||||
return connection
|
@ -1,190 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from datetime import datetime
|
||||
from rackclient.lib import RACK_CTX
|
||||
from rackclient import exceptions
|
||||
|
||||
import logging
|
||||
import redis
|
||||
import time
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
PIPE = 1
|
||||
FIFO = 2
|
||||
PORT = 6379
|
||||
|
||||
|
||||
def read_state_key(name):
|
||||
return name + "_read"
|
||||
|
||||
|
||||
def write_state_key(name):
|
||||
return name + "_write"
|
||||
|
||||
|
||||
def reference_key_pattern(name="*", pid="*"):
|
||||
return name + ":" + pid
|
||||
|
||||
|
||||
class Pipe:
|
||||
|
||||
def __init__(self, name=None, read=None, write=None):
|
||||
now = datetime.now()
|
||||
self.host = RACK_CTX.proxy_ip
|
||||
self.port = PORT
|
||||
|
||||
if name:
|
||||
self.is_named = True
|
||||
self.r = redis.StrictRedis(host=self.host, port=self.port, db=FIFO)
|
||||
self.name = name
|
||||
read_state = now
|
||||
write_state = now
|
||||
else:
|
||||
self.is_named = False
|
||||
self.r = redis.StrictRedis(host=self.host, port=self.port, db=PIPE)
|
||||
parent_pipe = self.r.keys(reference_key_pattern(pid=RACK_CTX.pid))
|
||||
if parent_pipe:
|
||||
self.name = self.r.get(parent_pipe[0])
|
||||
else:
|
||||
self.name = RACK_CTX.pid
|
||||
read_state = \
|
||||
self.r.hget(read_state_key(self.name), RACK_CTX.pid) or now
|
||||
write_state = \
|
||||
self.r.hget(write_state_key(self.name), RACK_CTX.pid) or now
|
||||
if read is not None:
|
||||
if read:
|
||||
read_state = now
|
||||
else:
|
||||
read_state = "close"
|
||||
if write is not None:
|
||||
if write:
|
||||
write_state = now
|
||||
else:
|
||||
write_state = "close"
|
||||
self.read_state = read_state
|
||||
self.write_state = write_state
|
||||
self.r.hset(read_state_key(self.name), RACK_CTX.pid, self.read_state)
|
||||
self.r.hset(write_state_key(self.name), RACK_CTX.pid, self.write_state)
|
||||
|
||||
def read(self):
|
||||
if self.read_state == "close":
|
||||
raise exceptions.NoReadDescriptor()
|
||||
data = self._read()
|
||||
while data is None:
|
||||
data = self._read()
|
||||
time.sleep(0.1)
|
||||
return data
|
||||
|
||||
def _read(self):
|
||||
data = self.r.lpop(self.name)
|
||||
if data is None and not self.has_writer():
|
||||
self.flush()
|
||||
raise exceptions.EndOfFile()
|
||||
else:
|
||||
return data
|
||||
|
||||
def write(self, data):
|
||||
if self.write_state == "close":
|
||||
raise exceptions.NoWriteDescriptor()
|
||||
self.r.rpush(self.name, data)
|
||||
if self.has_reader():
|
||||
return True
|
||||
else:
|
||||
raise exceptions.NoReadDescriptor()
|
||||
|
||||
def close_reader(self):
|
||||
self.read_state = "close"
|
||||
self.r.hset(read_state_key(self.name), RACK_CTX.pid, self.read_state)
|
||||
|
||||
def close_writer(self):
|
||||
self.write_state = "close"
|
||||
self.r.hset(write_state_key(self.name), RACK_CTX.pid, self.write_state)
|
||||
|
||||
def has_reader(self):
|
||||
read_states = self.r.hvals(read_state_key(self.name))
|
||||
if len(read_states) <= 1:
|
||||
return True
|
||||
for state in read_states:
|
||||
if not state == "close":
|
||||
return True
|
||||
return False
|
||||
|
||||
def has_writer(self):
|
||||
write_states = self.r.hvals(write_state_key(self.name))
|
||||
if len(write_states) <= 1:
|
||||
return True
|
||||
for state in write_states:
|
||||
if not state == "close":
|
||||
return True
|
||||
return False
|
||||
|
||||
def flush(self):
|
||||
keys = [self.name,
|
||||
read_state_key(self.name),
|
||||
write_state_key(self.name)]
|
||||
if not self.is_named:
|
||||
keys = keys + self.r.keys(reference_key_pattern(name=self.name))
|
||||
self.r.delete(*tuple(keys))
|
||||
|
||||
@classmethod
|
||||
def flush_by_pid(cls, pid, host=None):
|
||||
if not host:
|
||||
host = RACK_CTX.proxy_ip
|
||||
|
||||
r = redis.StrictRedis(host=host, port=PORT, db=PIPE)
|
||||
keys = [pid,
|
||||
read_state_key(pid),
|
||||
write_state_key(pid)]
|
||||
keys = keys + r.keys(reference_key_pattern(name=pid))
|
||||
r.delete(*tuple(keys))
|
||||
|
||||
@classmethod
|
||||
def flush_by_name(cls, name, host=None):
|
||||
if not host:
|
||||
host = RACK_CTX.proxy_ip
|
||||
|
||||
r = redis.StrictRedis(host=host, port=PORT, db=FIFO)
|
||||
keys = [name,
|
||||
read_state_key(name),
|
||||
write_state_key(name)]
|
||||
r.delete(*tuple(keys))
|
||||
|
||||
@classmethod
|
||||
def share(cls, ppid, pid, host=None):
|
||||
if not host:
|
||||
host = RACK_CTX.proxy_ip
|
||||
|
||||
now = datetime.now()
|
||||
r = redis.StrictRedis(host=host, port=PORT, db=PIPE)
|
||||
keys = r.keys(reference_key_pattern(pid=ppid))
|
||||
if keys:
|
||||
name = r.get(keys[0])
|
||||
else:
|
||||
if r.keys(read_state_key(ppid)):
|
||||
name = ppid
|
||||
else:
|
||||
return False
|
||||
reference_key = reference_key_pattern(name, pid)
|
||||
r.set(reference_key, name)
|
||||
current_read_state = r.hget(read_state_key(name), name)
|
||||
current_write_state = r.hget(write_state_key(name), name)
|
||||
if not current_read_state == "close":
|
||||
current_read_state = now
|
||||
if not current_write_state == "close":
|
||||
current_write_state = now
|
||||
r.hset(read_state_key(name), pid, current_read_state)
|
||||
r.hset(write_state_key(name), pid, current_write_state)
|
||||
return True
|
@ -1,54 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import redis
|
||||
|
||||
from rackclient.lib import RACK_CTX
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FIFO = 3
|
||||
PORT = 6379
|
||||
|
||||
|
||||
def get_host():
|
||||
return RACK_CTX.proxy_ip
|
||||
|
||||
|
||||
class Shm(object):
|
||||
def __init__(self):
|
||||
self.host = get_host()
|
||||
self.port = PORT
|
||||
self.r = redis.StrictRedis(host=self.host, port=self.port, db=FIFO)
|
||||
|
||||
def read(self, key):
|
||||
data = self.r.get(key)
|
||||
return data
|
||||
|
||||
def write(self, key, value):
|
||||
return self.r.set(key, value)
|
||||
|
||||
def list_read(self, key):
|
||||
count = self.r.llen(key)
|
||||
return self.r.lrange(key, 0, count)
|
||||
|
||||
def list_write(self, key, value):
|
||||
return self.r.rpush(key, value)
|
||||
|
||||
def list_delete_value(self, key, value):
|
||||
return self.r.lrem(key, 1, value)
|
||||
|
||||
def delete(self, key):
|
||||
return self.r.delete(key)
|
@ -1,66 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import websocket
|
||||
|
||||
from rackclient.lib import RACK_CTX
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
WS_PORT = 8888
|
||||
|
||||
|
||||
class SignalManager(object):
|
||||
def __init__(self, url=None):
|
||||
if url:
|
||||
self.url = url.rstrip('/')
|
||||
else:
|
||||
self.url = "ws://" + ':'.join([RACK_CTX.proxy_ip, str(WS_PORT)])
|
||||
|
||||
def receive(self, on_msg_func, pid=None):
|
||||
self.on_msg_func = on_msg_func
|
||||
if pid:
|
||||
header = 'PID: ' + pid
|
||||
elif getattr(RACK_CTX, 'pid', False):
|
||||
header = 'PID: ' + RACK_CTX.pid
|
||||
else:
|
||||
raise Exception("Target PID is required.")
|
||||
wsapp = websocket.WebSocketApp(
|
||||
url=self.url + '/receive',
|
||||
header=[header],
|
||||
on_message=self.on_message,
|
||||
on_error=self.on_error,
|
||||
on_close=self.on_close)
|
||||
LOG.debug("Started to wait for messages.")
|
||||
wsapp.run_forever()
|
||||
|
||||
def on_message(self, ws, message):
|
||||
LOG.debug("Received a message: %s" % message)
|
||||
if self.on_msg_func(message):
|
||||
ws.close()
|
||||
|
||||
def on_error(self, ws, error):
|
||||
LOG.error(error)
|
||||
ws.close()
|
||||
raise Exception("Error ocurred while waiting for messages.")
|
||||
|
||||
def on_close(self, ws):
|
||||
LOG.debug("Websocket connection %s closed" % ws.header[0])
|
||||
|
||||
def send(self, target_id, message):
|
||||
ws = websocket.create_connection(self.url + '/send',
|
||||
header=['PID: ' + target_id])
|
||||
LOG.debug("Send a message: %s" % message)
|
||||
ws.send(message)
|
||||
ws.close()
|
@ -1,146 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import Queue
|
||||
import threading
|
||||
|
||||
from rackclient import exceptions
|
||||
from rackclient.v1 import processes
|
||||
from rackclient.lib import RACK_CTX
|
||||
from rackclient.lib.syscall.default import messaging
|
||||
from rackclient.lib.syscall.default import pipe as rackpipe
|
||||
from rackclient.lib.syscall.default import file as rackfile
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def fork(opt_list, timeout_limit=180):
|
||||
LOG.debug("start fork")
|
||||
LOG.debug("fork create processes count: %s", len(opt_list))
|
||||
|
||||
return_process_list = []
|
||||
while True:
|
||||
try:
|
||||
child_list = _bulk_fork(RACK_CTX.pid, opt_list)
|
||||
success_list, error_list = _check_connection(RACK_CTX.pid,
|
||||
child_list,
|
||||
timeout_limit)
|
||||
except Exception as e:
|
||||
raise exceptions.ForkError(e)
|
||||
|
||||
return_process_list += success_list
|
||||
if error_list:
|
||||
opt_list = []
|
||||
for error_process in error_list:
|
||||
args = error_process.args
|
||||
args.pop('gid')
|
||||
args.pop('pid')
|
||||
args.pop('ppid')
|
||||
args.pop('proxy_ip')
|
||||
opt_list.append(dict(args=args))
|
||||
else:
|
||||
break
|
||||
|
||||
return return_process_list
|
||||
|
||||
|
||||
def _bulk_fork(pid, args_list):
|
||||
LOG.debug("start bulk_fork")
|
||||
q = Queue.Queue()
|
||||
|
||||
def _fork(pid, **kwargs):
|
||||
try:
|
||||
child = RACK_CTX.client.processes.create(gid=RACK_CTX.gid,
|
||||
ppid=pid,
|
||||
**kwargs)
|
||||
q.put(child)
|
||||
except Exception as e:
|
||||
attr = dict(args=kwargs, error=e)
|
||||
q.put(processes.Process(RACK_CTX.client, attr))
|
||||
|
||||
tg = []
|
||||
process_list = []
|
||||
while True:
|
||||
for args in args_list:
|
||||
t = threading.Thread(target=_fork, args=(pid,), kwargs=args)
|
||||
t.start()
|
||||
tg.append(t)
|
||||
|
||||
for t in tg:
|
||||
t.join()
|
||||
|
||||
args_list = []
|
||||
success_processes = []
|
||||
for i in range(q.qsize()):
|
||||
process = q.get()
|
||||
if hasattr(process, "error"):
|
||||
args_list.append(process.args)
|
||||
else:
|
||||
success_processes.append(process)
|
||||
|
||||
process_list += success_processes
|
||||
LOG.debug("bulk_fork success processes count: %s", len(process_list))
|
||||
if not success_processes:
|
||||
msg = "No child process is created."
|
||||
raise Exception(msg)
|
||||
elif not args_list:
|
||||
break
|
||||
return process_list
|
||||
|
||||
|
||||
def _check_connection(pid, process_list, timeout):
|
||||
LOG.debug("start check_connection")
|
||||
msg = messaging.Messaging()
|
||||
msg_list = msg.receive_all_msg(timeout_limit=timeout,
|
||||
msg_limit_count=len(process_list))
|
||||
|
||||
pid_list = []
|
||||
for message in msg_list:
|
||||
if message.get('pid'):
|
||||
pid_list.append(message.get('pid'))
|
||||
|
||||
actives = []
|
||||
inactives = []
|
||||
for process in process_list:
|
||||
if pid_list and process.pid in pid_list:
|
||||
rackpipe.Pipe.share(pid, process.pid)
|
||||
msg.send_msg(target=process.pid, message="start")
|
||||
actives.append(process)
|
||||
pid_list.remove(process.pid)
|
||||
else:
|
||||
RACK_CTX.client.processes.delete(RACK_CTX.gid, process.pid)
|
||||
inactives.append(process)
|
||||
|
||||
LOG.debug("_check_connection active processes count: %s", len(actives))
|
||||
LOG.debug("_check_connection inactive processes count: %s", len(inactives))
|
||||
|
||||
if not actives:
|
||||
msg = "No child process is active."
|
||||
raise Exception(msg)
|
||||
|
||||
return actives, inactives
|
||||
|
||||
|
||||
def kill(pid):
|
||||
RACK_CTX.client.processes.delete(RACK_CTX.gid, pid)
|
||||
|
||||
|
||||
def pipe(name=None):
|
||||
p = rackpipe.Pipe(name)
|
||||
return p
|
||||
|
||||
|
||||
def fopen(file_path, mode="r"):
|
||||
return rackfile.File(file_path, mode)
|
@ -1,17 +0,0 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
|
||||
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
@ -1,221 +0,0 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# Copyright 2013 Spanish National Research Council.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# E0202: An attribute inherited from %s hide this method
|
||||
# pylint: disable=E0202
|
||||
|
||||
import abc
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import six
|
||||
from stevedore import extension
|
||||
|
||||
from rackclient.openstack.common.apiclient import exceptions
|
||||
|
||||
|
||||
_discovered_plugins = {}
|
||||
|
||||
|
||||
def discover_auth_systems():
|
||||
"""Discover the available auth-systems.
|
||||
|
||||
This won't take into account the old style auth-systems.
|
||||
"""
|
||||
global _discovered_plugins
|
||||
_discovered_plugins = {}
|
||||
|
||||
def add_plugin(ext):
|
||||
_discovered_plugins[ext.name] = ext.plugin
|
||||
|
||||
ep_namespace = "rackclient.openstack.common.apiclient.auth"
|
||||
mgr = extension.ExtensionManager(ep_namespace)
|
||||
mgr.map(add_plugin)
|
||||
|
||||
|
||||
def load_auth_system_opts(parser):
|
||||
"""Load options needed by the available auth-systems into a parser.
|
||||
|
||||
This function will try to populate the parser with options from the
|
||||
available plugins.
|
||||
"""
|
||||
group = parser.add_argument_group("Common auth options")
|
||||
BaseAuthPlugin.add_common_opts(group)
|
||||
for name, auth_plugin in six.iteritems(_discovered_plugins):
|
||||
group = parser.add_argument_group(
|
||||
"Auth-system '%s' options" % name,
|
||||
conflict_handler="resolve")
|
||||
auth_plugin.add_opts(group)
|
||||
|
||||
|
||||
def load_plugin(auth_system):
|
||||
try:
|
||||
plugin_class = _discovered_plugins[auth_system]
|
||||
except KeyError:
|
||||
raise exceptions.AuthSystemNotFound(auth_system)
|
||||
return plugin_class(auth_system=auth_system)
|
||||
|
||||
|
||||
def load_plugin_from_args(args):
|
||||
"""Load required plugin and populate it with options.
|
||||
|
||||
Try to guess auth system if it is not specified. Systems are tried in
|
||||
alphabetical order.
|
||||
|
||||
:type args: argparse.Namespace
|
||||
:raises: AuthPluginOptionsMissing
|
||||
"""
|
||||
auth_system = args.os_auth_system
|
||||
if auth_system:
|
||||
plugin = load_plugin(auth_system)
|
||||
plugin.parse_opts(args)
|
||||
plugin.sufficient_options()
|
||||
return plugin
|
||||
|
||||
for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)):
|
||||
plugin_class = _discovered_plugins[plugin_auth_system]
|
||||
plugin = plugin_class()
|
||||
plugin.parse_opts(args)
|
||||
try:
|
||||
plugin.sufficient_options()
|
||||
except exceptions.AuthPluginOptionsMissing:
|
||||
continue
|
||||
return plugin
|
||||
raise exceptions.AuthPluginOptionsMissing(["auth_system"])
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseAuthPlugin(object):
|
||||
"""Base class for authentication plugins.
|
||||
|
||||
An authentication plugin needs to override at least the authenticate
|
||||
method to be a valid plugin.
|
||||
"""
|
||||
|
||||
auth_system = None
|
||||
opt_names = []
|
||||
common_opt_names = [
|
||||
"auth_system",
|
||||
"username",
|
||||
"password",
|
||||
"tenant_name",
|
||||
"token",
|
||||
"auth_url",
|
||||
]
|
||||
|
||||
def __init__(self, auth_system=None, **kwargs):
|
||||
self.auth_system = auth_system or self.auth_system
|
||||
self.opts = dict((name, kwargs.get(name))
|
||||
for name in self.opt_names)
|
||||
|
||||
@staticmethod
|
||||
def _parser_add_opt(parser, opt):
|
||||
"""Add an option to parser in two variants.
|
||||
|
||||
:param opt: option name (with underscores)
|
||||
"""
|
||||
dashed_opt = opt.replace("_", "-")
|
||||
env_var = "OS_%s" % opt.upper()
|
||||
arg_default = os.environ.get(env_var, "")
|
||||
arg_help = "Defaults to env[%s]." % env_var
|
||||
parser.add_argument(
|
||||
"--os-%s" % dashed_opt,
|
||||
metavar="<%s>" % dashed_opt,
|
||||
default=arg_default,
|
||||
help=arg_help)
|
||||
parser.add_argument(
|
||||
"--os_%s" % opt,
|
||||
metavar="<%s>" % dashed_opt,
|
||||
help=argparse.SUPPRESS)
|
||||
|
||||
@classmethod
|
||||
def add_opts(cls, parser):
|
||||
"""Populate the parser with the options for this plugin.
|
||||
"""
|
||||
for opt in cls.opt_names:
|
||||
# use `BaseAuthPlugin.common_opt_names` since it is never
|
||||
# changed in child classes
|
||||
if opt not in BaseAuthPlugin.common_opt_names:
|
||||
cls._parser_add_opt(parser, opt)
|
||||
|
||||
@classmethod
|
||||
def add_common_opts(cls, parser):
|
||||
"""Add options that are common for several plugins.
|
||||
"""
|
||||
for opt in cls.common_opt_names:
|
||||
cls._parser_add_opt(parser, opt)
|
||||
|
||||
@staticmethod
|
||||
def get_opt(opt_name, args):
|
||||
"""Return option name and value.
|
||||
|
||||
:param opt_name: name of the option, e.g., "username"
|
||||
:param args: parsed arguments
|
||||
"""
|
||||
return (opt_name, getattr(args, "os_%s" % opt_name, None))
|
||||
|
||||
def parse_opts(self, args):
|
||||
"""Parse the actual auth-system options if any.
|
||||
|
||||
This method is expected to populate the attribute `self.opts` with a
|
||||
dict containing the options and values needed to make authentication.
|
||||
"""
|
||||
self.opts.update(dict(self.get_opt(opt_name, args)
|
||||
for opt_name in self.opt_names))
|
||||
|
||||
def authenticate(self, http_client):
|
||||
"""Authenticate using plugin defined method.
|
||||
|
||||
The method usually analyses `self.opts` and performs
|
||||
a request to authentication server.
|
||||
|
||||
:param http_client: client object that needs authentication
|
||||
:type http_client: HTTPClient
|
||||
:raises: AuthorizationFailure
|
||||
"""
|
||||
self.sufficient_options()
|
||||
self._do_authenticate(http_client)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _do_authenticate(self, http_client):
|
||||
"""Protected method for authentication.
|
||||
"""
|
||||
|
||||
def sufficient_options(self):
|
||||
"""Check if all required options are present.
|
||||
|
||||
:raises: AuthPluginOptionsMissing
|
||||
"""
|
||||
missing = [opt
|
||||
for opt in self.opt_names
|
||||
if not self.opts.get(opt)]
|
||||
if missing:
|
||||
raise exceptions.AuthPluginOptionsMissing(missing)
|
||||
|
||||
@abc.abstractmethod
|
||||
def token_and_endpoint(self, endpoint_type, service_type):
|
||||
"""Return token and endpoint.
|
||||
|
||||
:param service_type: Service type of the endpoint
|
||||
:type service_type: string
|
||||
:param endpoint_type: Type of endpoint.
|
||||
Possible values: public or publicURL,
|
||||
internal or internalURL,
|
||||
admin or adminURL
|
||||
:type endpoint_type: string
|
||||
:returns: tuple of token and endpoint strings
|
||||
:raises: EndpointException
|
||||
"""
|
@ -1,509 +0,0 @@
|
||||
# Copyright 2010 Jacob Kaplan-Moss
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2012 Grid Dynamics
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Base utilities to build API operation managers and objects on top of.
|
||||
"""
|
||||
|
||||
# E1102: %s is not callable
|
||||
# pylint: disable=E1102
|
||||
|
||||
import abc
|
||||
import copy
|
||||
|
||||
import six
|
||||
from six.moves.urllib import parse
|
||||
|
||||
from rackclient.openstack.common.apiclient import exceptions
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
from rackclient.openstack.common import strutils
|
||||
|
||||
|
||||
def getid(obj):
|
||||
"""Return id if argument is a Resource.
|
||||
|
||||
Abstracts the common pattern of allowing both an object or an object's ID
|
||||
(UUID) as a parameter when dealing with relationships.
|
||||
"""
|
||||
try:
|
||||
if obj.uuid:
|
||||
return obj.uuid
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
return obj.id
|
||||
except AttributeError:
|
||||
return obj
|
||||
|
||||
|
||||
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
|
||||
class HookableMixin(object):
|
||||
"""Mixin so classes can register and run hooks."""
|
||||
_hooks_map = {}
|
||||
|
||||
@classmethod
|
||||
def add_hook(cls, hook_type, hook_func):
|
||||
"""Add a new hook of specified type.
|
||||
|
||||
:param cls: class that registers hooks
|
||||
:param hook_type: hook type, e.g., '__pre_parse_args__'
|
||||
:param hook_func: hook function
|
||||
"""
|
||||
if hook_type not in cls._hooks_map:
|
||||
cls._hooks_map[hook_type] = []
|
||||
|
||||
cls._hooks_map[hook_type].append(hook_func)
|
||||
|
||||
@classmethod
|
||||
def run_hooks(cls, hook_type, *args, **kwargs):
|
||||
"""Run all hooks of specified type.
|
||||
|
||||
:param cls: class that registers hooks
|
||||
:param hook_type: hook type, e.g., '__pre_parse_args__'
|
||||
:param args: args to be passed to every hook function
|
||||
:param kwargs: kwargs to be passed to every hook function
|
||||
"""
|
||||
hook_funcs = cls._hooks_map.get(hook_type) or []
|
||||
for hook_func in hook_funcs:
|
||||
hook_func(*args, **kwargs)
|
||||
|
||||
|
||||
class BaseManager(HookableMixin):
|
||||
"""Basic manager type providing common operations.
|
||||
|
||||
Managers interact with a particular type of API (servers, flavors, images,
|
||||
etc.) and provide CRUD operations for them.
|
||||
"""
|
||||
resource_class = None
|
||||
|
||||
def __init__(self, client):
|
||||
"""Initializes BaseManager with `client`.
|
||||
|
||||
:param client: instance of BaseClient descendant for HTTP requests
|
||||
"""
|
||||
super(BaseManager, self).__init__()
|
||||
self.client = client
|
||||
|
||||
def _list(self, url, response_key, obj_class=None, json=None):
|
||||
"""List the collection.
|
||||
|
||||
:param url: a partial URL, e.g., '/servers'
|
||||
:param response_key: the key to be looked up in response dictionary,
|
||||
e.g., 'servers'
|
||||
:param obj_class: class for constructing the returned objects
|
||||
(self.resource_class will be used by default)
|
||||
:param json: data that will be encoded as JSON and passed in POST
|
||||
request (GET will be sent by default)
|
||||
"""
|
||||
if json:
|
||||
body = self.client.post(url, json=json).json()
|
||||
else:
|
||||
body = self.client.get(url).json()
|
||||
|
||||
if obj_class is None:
|
||||
obj_class = self.resource_class
|
||||
|
||||
data = body[response_key]
|
||||
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
|
||||
# unlike other services which just return the list...
|
||||
try:
|
||||
data = data['values']
|
||||
except (KeyError, TypeError):
|
||||
pass
|
||||
|
||||
return [obj_class(self, res, loaded=True) for res in data if res]
|
||||
|
||||
def _get(self, url, response_key):
|
||||
"""Get an object from collection.
|
||||
|
||||
:param url: a partial URL, e.g., '/servers'
|
||||
:param response_key: the key to be looked up in response dictionary,
|
||||
e.g., 'server'
|
||||
"""
|
||||
body = self.client.get(url).json()
|
||||
return self.resource_class(self, body[response_key], loaded=True)
|
||||
|
||||
def _head(self, url):
|
||||
"""Retrieve request headers for an object.
|
||||
|
||||
:param url: a partial URL, e.g., '/servers'
|
||||
"""
|
||||
resp = self.client.head(url)
|
||||
return resp.status_code == 204
|
||||
|
||||
def _post(self, url, json, response_key, return_raw=False):
|
||||
"""Create an object.
|
||||
|
||||
:param url: a partial URL, e.g., '/servers'
|
||||
:param json: data that will be encoded as JSON and passed in POST
|
||||
request (GET will be sent by default)
|
||||
:param response_key: the key to be looked up in response dictionary,
|
||||
e.g., 'servers'
|
||||
:param return_raw: flag to force returning raw JSON instead of
|
||||
Python object of self.resource_class
|
||||
"""
|
||||
body = self.client.post(url, json=json).json()
|
||||
if return_raw:
|
||||
return body[response_key]
|
||||
return self.resource_class(self, body[response_key])
|
||||
|
||||
def _put(self, url, json=None, response_key=None):
|
||||
"""Update an object with PUT method.
|
||||
|
||||
:param url: a partial URL, e.g., '/servers'
|
||||
:param json: data that will be encoded as JSON and passed in POST
|
||||
request (GET will be sent by default)
|
||||
:param response_key: the key to be looked up in response dictionary,
|
||||
e.g., 'servers'
|
||||
"""
|
||||
resp = self.client.put(url, json=json)
|
||||
# PUT requests may not return a body
|
||||
if resp.content:
|
||||
body = resp.json()
|
||||
if response_key is not None:
|
||||
return self.resource_class(self, body[response_key])
|
||||
else:
|
||||
return self.resource_class(self, body)
|
||||
|
||||
def _patch(self, url, json=None, response_key=None):
|
||||
"""Update an object with PATCH method.
|
||||
|
||||
:param url: a partial URL, e.g., '/servers'
|
||||
:param json: data that will be encoded as JSON and passed in POST
|
||||
request (GET will be sent by default)
|
||||
:param response_key: the key to be looked up in response dictionary,
|
||||
e.g., 'servers'
|
||||
"""
|
||||
body = self.client.patch(url, json=json).json()
|
||||
if response_key is not None:
|
||||
return self.resource_class(self, body[response_key])
|
||||
else:
|
||||
return self.resource_class(self, body)
|
||||
|
||||
def _delete(self, url):
|
||||
"""Delete an object.
|
||||
|
||||
:param url: a partial URL, e.g., '/servers/my-server'
|
||||
"""
|
||||
return self.client.delete(url)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class ManagerWithFind(BaseManager):
|
||||
"""Manager with additional `find()`/`findall()` methods."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def list(self):
|
||||
pass
|
||||
|
||||
def find(self, **kwargs):
|
||||
"""Find a single item with attributes matching ``**kwargs``.
|
||||
|
||||
This isn't very efficient: it loads the entire list then filters on
|
||||
the Python side.
|
||||
"""
|
||||
matches = self.findall(**kwargs)
|
||||
num_matches = len(matches)
|
||||
if num_matches == 0:
|
||||
msg = _("No %(name)s matching %(args)s.") % {
|
||||
'name': self.resource_class.__name__,
|
||||
'args': kwargs
|
||||
}
|
||||
raise exceptions.NotFound(msg)
|
||||
elif num_matches > 1:
|
||||
raise exceptions.NoUniqueMatch()
|
||||
else:
|
||||
return matches[0]
|
||||
|
||||
def findall(self, **kwargs):
|
||||
"""Find all items with attributes matching ``**kwargs``.
|
||||
|
||||
This isn't very efficient: it loads the entire list then filters on
|
||||
the Python side.
|
||||
"""
|
||||
found = []
|
||||
searches = kwargs.items()
|
||||
|
||||
for obj in self.list():
|
||||
try:
|
||||
if all(getattr(obj, attr) == value
|
||||
for (attr, value) in searches):
|
||||
found.append(obj)
|
||||
except AttributeError:
|
||||
continue
|
||||
|
||||
return found
|
||||
|
||||
|
||||
class CrudManager(BaseManager):
|
||||
"""Base manager class for manipulating entities.
|
||||
|
||||
Children of this class are expected to define a `collection_key` and `key`.
|
||||
|
||||
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
|
||||
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
|
||||
objects containing a list of member resources (e.g. `{'entities': [{},
|
||||
{}, {}]}`).
|
||||
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
|
||||
refer to an individual member of the collection.
|
||||
|
||||
"""
|
||||
collection_key = None
|
||||
key = None
|
||||
|
||||
def build_url(self, base_url=None, **kwargs):
|
||||
"""Builds a resource URL for the given kwargs.
|
||||
|
||||
Given an example collection where `collection_key = 'entities'` and
|
||||
`key = 'entity'`, the following URL's could be generated.
|
||||
|
||||
By default, the URL will represent a collection of entities, e.g.::
|
||||
|
||||
/entities
|
||||
|
||||
If kwargs contains an `entity_id`, then the URL will represent a
|
||||
specific member, e.g.::
|
||||
|
||||
/entities/{entity_id}
|
||||
|
||||
:param base_url: if provided, the generated URL will be appended to it
|
||||
"""
|
||||
url = base_url if base_url is not None else ''
|
||||
|
||||
url += '/%s' % self.collection_key
|
||||
|
||||
# do we have a specific entity?
|
||||
entity_id = kwargs.get('%s_id' % self.key)
|
||||
if entity_id is not None:
|
||||
url += '/%s' % entity_id
|
||||
|
||||
return url
|
||||
|
||||
def _filter_kwargs(self, kwargs):
|
||||
"""Drop null values and handle ids."""
|
||||
for key, ref in six.iteritems(kwargs.copy()):
|
||||
if ref is None:
|
||||
kwargs.pop(key)
|
||||
else:
|
||||
if isinstance(ref, Resource):
|
||||
kwargs.pop(key)
|
||||
kwargs['%s_id' % key] = getid(ref)
|
||||
return kwargs
|
||||
|
||||
def create(self, **kwargs):
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
return self._post(
|
||||
self.build_url(**kwargs),
|
||||
{self.key: kwargs},
|
||||
self.key)
|
||||
|
||||
def get(self, **kwargs):
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
return self._get(
|
||||
self.build_url(**kwargs),
|
||||
self.key)
|
||||
|
||||
def head(self, **kwargs):
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
return self._head(self.build_url(**kwargs))
|
||||
|
||||
def list(self, base_url=None, **kwargs):
|
||||
"""List the collection.
|
||||
|
||||
:param base_url: if provided, the generated URL will be appended to it
|
||||
"""
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
|
||||
return self._list(
|
||||
'%(base_url)s%(query)s' % {
|
||||
'base_url': self.build_url(base_url=base_url, **kwargs),
|
||||
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
|
||||
},
|
||||
self.collection_key)
|
||||
|
||||
def put(self, base_url=None, **kwargs):
|
||||
"""Update an element.
|
||||
|
||||
:param base_url: if provided, the generated URL will be appended to it
|
||||
"""
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
|
||||
return self._put(self.build_url(base_url=base_url, **kwargs))
|
||||
|
||||
def update(self, **kwargs):
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
params = kwargs.copy()
|
||||
params.pop('%s_id' % self.key)
|
||||
|
||||
return self._patch(
|
||||
self.build_url(**kwargs),
|
||||
{self.key: params},
|
||||
self.key)
|
||||
|
||||
def delete(self, **kwargs):
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
|
||||
return self._delete(
|
||||
self.build_url(**kwargs))
|
||||
|
||||
def find(self, base_url=None, **kwargs):
|
||||
"""Find a single item with attributes matching ``**kwargs``.
|
||||
|
||||
:param base_url: if provided, the generated URL will be appended to it
|
||||
"""
|
||||
kwargs = self._filter_kwargs(kwargs)
|
||||
|
||||
rl = self._list(
|
||||
'%(base_url)s%(query)s' % {
|
||||
'base_url': self.build_url(base_url=base_url, **kwargs),
|
||||
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
|
||||
},
|
||||
self.collection_key)
|
||||
num = len(rl)
|
||||
|
||||
if num == 0:
|
||||
msg = _("No %(name)s matching %(args)s.") % {
|
||||
'name': self.resource_class.__name__,
|
||||
'args': kwargs
|
||||
}
|
||||
raise exceptions.NotFound(404, msg)
|
||||
elif num > 1:
|
||||
raise exceptions.NoUniqueMatch
|
||||
else:
|
||||
return rl[0]
|
||||
|
||||
|
||||
class Extension(HookableMixin):
|
||||
"""Extension descriptor."""
|
||||
|
||||
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
|
||||
manager_class = None
|
||||
|
||||
def __init__(self, name, module):
|
||||
super(Extension, self).__init__()
|
||||
self.name = name
|
||||
self.module = module
|
||||
self._parse_extension_module()
|
||||
|
||||
def _parse_extension_module(self):
|
||||
self.manager_class = None
|
||||
for attr_name, attr_value in self.module.__dict__.items():
|
||||
if attr_name in self.SUPPORTED_HOOKS:
|
||||
self.add_hook(attr_name, attr_value)
|
||||
else:
|
||||
try:
|
||||
if issubclass(attr_value, BaseManager):
|
||||
self.manager_class = attr_value
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
def __repr__(self):
|
||||
return "<Extension '%s'>" % self.name
|
||||
|
||||
|
||||
class Resource(object):
|
||||
"""Base class for OpenStack resources (tenant, user, etc.).
|
||||
|
||||
This is pretty much just a bag for attributes.
|
||||
"""
|
||||
|
||||
HUMAN_ID = False
|
||||
NAME_ATTR = 'name'
|
||||
|
||||
def __init__(self, manager, info, loaded=False):
|
||||
"""Populate and bind to a manager.
|
||||
|
||||
:param manager: BaseManager object
|
||||
:param info: dictionary representing resource attributes
|
||||
:param loaded: prevent lazy-loading if set to True
|
||||
"""
|
||||
self.manager = manager
|
||||
self._info = info
|
||||
self._add_details(info)
|
||||
self._loaded = loaded
|
||||
|
||||
def __repr__(self):
|
||||
reprkeys = sorted(k
|
||||
for k in self.__dict__.keys()
|
||||
if k[0] != '_' and k != 'manager')
|
||||
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
|
||||
return "<%s %s>" % (self.__class__.__name__, info)
|
||||
|
||||
@property
|
||||
def human_id(self):
|
||||
"""Human-readable ID which can be used for bash completion.
|
||||
"""
|
||||
if self.HUMAN_ID:
|
||||
name = getattr(self, self.NAME_ATTR, None)
|
||||
if name is not None:
|
||||
return strutils.to_slug(name)
|
||||
return None
|
||||
|
||||
def _add_details(self, info):
|
||||
for (k, v) in six.iteritems(info):
|
||||
try:
|
||||
setattr(self, k, v)
|
||||
self._info[k] = v
|
||||
except AttributeError:
|
||||
# In this case we already defined the attribute on the class
|
||||
pass
|
||||
|
||||
def __getattr__(self, k):
|
||||
if k not in self.__dict__:
|
||||
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
|
||||
if not self.is_loaded():
|
||||
self.get()
|
||||
return self.__getattr__(k)
|
||||
|
||||
raise AttributeError(k)
|
||||
else:
|
||||
return self.__dict__[k]
|
||||
|
||||
def get(self):
|
||||
"""Support for lazy loading details.
|
||||
|
||||
Some clients, such as novaclient have the option to lazy load the
|
||||
details, details which can be loaded with this function.
|
||||
"""
|
||||
# set_loaded() first ... so if we have to bail, we know we tried.
|
||||
self.set_loaded(True)
|
||||
if not hasattr(self.manager, 'get'):
|
||||
return
|
||||
|
||||
new = self.manager.get(self.id)
|
||||
if new:
|
||||
self._add_details(new._info)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Resource):
|
||||
return NotImplemented
|
||||
# two resources of different types are not equal
|
||||
if not isinstance(other, self.__class__):
|
||||
return False
|
||||
if hasattr(self, 'id') and hasattr(other, 'id'):
|
||||
return self.id == other.id
|
||||
return self._info == other._info
|
||||
|
||||
def is_loaded(self):
|
||||
return self._loaded
|
||||
|
||||
def set_loaded(self, val):
|
||||
self._loaded = val
|
||||
|
||||
def to_dict(self):
|
||||
return copy.deepcopy(self._info)
|
@ -1,363 +0,0 @@
|
||||
# Copyright 2010 Jacob Kaplan-Moss
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2011 Piston Cloud Computing, Inc.
|
||||
# Copyright 2013 Alessio Ababilov
|
||||
# Copyright 2013 Grid Dynamics
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
OpenStack Client interface. Handles the REST calls and responses.
|
||||
"""
|
||||
|
||||
# E0202: An attribute inherited from %s hide this method
|
||||
# pylint: disable=E0202
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from rackclient.openstack.common.apiclient import exceptions
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
from rackclient.openstack.common import importutils
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HTTPClient(object):
|
||||
"""This client handles sending HTTP requests to OpenStack servers.
|
||||
|
||||
Features:
|
||||
|
||||
- share authentication information between several clients to different
|
||||
services (e.g., for compute and image clients);
|
||||
- reissue authentication request for expired tokens;
|
||||
- encode/decode JSON bodies;
|
||||
- raise exceptions on HTTP errors;
|
||||
- pluggable authentication;
|
||||
- store authentication information in a keyring;
|
||||
- store time spent for requests;
|
||||
- register clients for particular services, so one can use
|
||||
`http_client.identity` or `http_client.compute`;
|
||||
- log requests and responses in a format that is easy to copy-and-paste
|
||||
into terminal and send the same request with curl.
|
||||
"""
|
||||
|
||||
user_agent = "rackclient.openstack.common.apiclient"
|
||||
|
||||
def __init__(self,
|
||||
auth_plugin,
|
||||
region_name=None,
|
||||
endpoint_type="publicURL",
|
||||
original_ip=None,
|
||||
verify=True,
|
||||
cert=None,
|
||||
timeout=None,
|
||||
timings=False,
|
||||
keyring_saver=None,
|
||||
debug=False,
|
||||
user_agent=None,
|
||||
http=None):
|
||||
self.auth_plugin = auth_plugin
|
||||
|
||||
self.endpoint_type = endpoint_type
|
||||
self.region_name = region_name
|
||||
|
||||
self.original_ip = original_ip
|
||||
self.timeout = timeout
|
||||
self.verify = verify
|
||||
self.cert = cert
|
||||
|
||||
self.keyring_saver = keyring_saver
|
||||
self.debug = debug
|
||||
self.user_agent = user_agent or self.user_agent
|
||||
|
||||
self.times = [] # [("item", starttime, endtime), ...]
|
||||
self.timings = timings
|
||||
|
||||
# requests within the same session can reuse TCP connections from pool
|
||||
self.http = http or requests.Session()
|
||||
|
||||
self.cached_token = None
|
||||
|
||||
def _http_log_req(self, method, url, kwargs):
|
||||
if not self.debug:
|
||||
return
|
||||
|
||||
string_parts = [
|
||||
"curl -i",
|
||||
"-X '%s'" % method,
|
||||
"'%s'" % url,
|
||||
]
|
||||
|
||||
for element in kwargs['headers']:
|
||||
header = "-H '%s: %s'" % (element, kwargs['headers'][element])
|
||||
string_parts.append(header)
|
||||
|
||||
_logger.debug("REQ: %s" % " ".join(string_parts))
|
||||
if 'data' in kwargs:
|
||||
_logger.debug("REQ BODY: %s\n" % (kwargs['data']))
|
||||
|
||||
def _http_log_resp(self, resp):
|
||||
if not self.debug:
|
||||
return
|
||||
_logger.debug(
|
||||
"RESP: [%s] %s\n",
|
||||
resp.status_code,
|
||||
resp.headers)
|
||||
if resp._content_consumed:
|
||||
_logger.debug(
|
||||
"RESP BODY: %s\n",
|
||||
resp.text)
|
||||
|
||||
def serialize(self, kwargs):
|
||||
if kwargs.get('json') is not None:
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
kwargs['data'] = json.dumps(kwargs['json'])
|
||||
try:
|
||||
del kwargs['json']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def get_timings(self):
|
||||
return self.times
|
||||
|
||||
def reset_timings(self):
|
||||
self.times = []
|
||||
|
||||
def request(self, method, url, **kwargs):
|
||||
"""Send an http request with the specified characteristics.
|
||||
|
||||
Wrapper around `requests.Session.request` to handle tasks such as
|
||||
setting headers, JSON encoding/decoding, and error handling.
|
||||
|
||||
:param method: method of HTTP request
|
||||
:param url: URL of HTTP request
|
||||
:param kwargs: any other parameter that can be passed to
|
||||
requests.Session.request (such as `headers`) or `json`
|
||||
that will be encoded as JSON and used as `data` argument
|
||||
"""
|
||||
kwargs.setdefault("headers", kwargs.get("headers", {}))
|
||||
kwargs["headers"]["User-Agent"] = self.user_agent
|
||||
if self.original_ip:
|
||||
kwargs["headers"]["Forwarded"] = "for=%s;by=%s" % (
|
||||
self.original_ip, self.user_agent)
|
||||
if self.timeout is not None:
|
||||
kwargs.setdefault("timeout", self.timeout)
|
||||
kwargs.setdefault("verify", self.verify)
|
||||
if self.cert is not None:
|
||||
kwargs.setdefault("cert", self.cert)
|
||||
self.serialize(kwargs)
|
||||
|
||||
self._http_log_req(method, url, kwargs)
|
||||
if self.timings:
|
||||
start_time = time.time()
|
||||
resp = self.http.request(method, url, **kwargs)
|
||||
if self.timings:
|
||||
self.times.append(("%s %s" % (method, url),
|
||||
start_time, time.time()))
|
||||
self._http_log_resp(resp)
|
||||
|
||||
if resp.status_code >= 400:
|
||||
_logger.debug(
|
||||
"Request returned failure status: %s",
|
||||
resp.status_code)
|
||||
raise exceptions.from_response(resp, method, url)
|
||||
|
||||
return resp
|
||||
|
||||
@staticmethod
|
||||
def concat_url(endpoint, url):
|
||||
"""Concatenate endpoint and final URL.
|
||||
|
||||
E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to
|
||||
"http://keystone/v2.0/tokens".
|
||||
|
||||
:param endpoint: the base URL
|
||||
:param url: the final URL
|
||||
"""
|
||||
return "%s/%s" % (endpoint.rstrip("/"), url.strip("/"))
|
||||
|
||||
def client_request(self, client, method, url, **kwargs):
|
||||
"""Send an http request using `client`'s endpoint and specified `url`.
|
||||
|
||||
If request was rejected as unauthorized (possibly because the token is
|
||||
expired), issue one authorization attempt and send the request once
|
||||
again.
|
||||
|
||||
:param client: instance of BaseClient descendant
|
||||
:param method: method of HTTP request
|
||||
:param url: URL of HTTP request
|
||||
:param kwargs: any other parameter that can be passed to
|
||||
`HTTPClient.request`
|
||||
"""
|
||||
|
||||
filter_args = {
|
||||
"endpoint_type": client.endpoint_type or self.endpoint_type,
|
||||
"service_type": client.service_type,
|
||||
}
|
||||
token, endpoint = (self.cached_token, client.cached_endpoint)
|
||||
just_authenticated = False
|
||||
if not (token and endpoint):
|
||||
try:
|
||||
token, endpoint = self.auth_plugin.token_and_endpoint(
|
||||
**filter_args)
|
||||
except exceptions.EndpointException:
|
||||
pass
|
||||
if not (token and endpoint):
|
||||
self.authenticate()
|
||||
just_authenticated = True
|
||||
token, endpoint = self.auth_plugin.token_and_endpoint(
|
||||
**filter_args)
|
||||
if not (token and endpoint):
|
||||
raise exceptions.AuthorizationFailure(
|
||||
_("Cannot find endpoint or token for request"))
|
||||
|
||||
old_token_endpoint = (token, endpoint)
|
||||
kwargs.setdefault("headers", {})["X-Auth-Token"] = token
|
||||
self.cached_token = token
|
||||
client.cached_endpoint = endpoint
|
||||
# Perform the request once. If we get Unauthorized, then it
|
||||
# might be because the auth token expired, so try to
|
||||
# re-authenticate and try again. If it still fails, bail.
|
||||
try:
|
||||
return self.request(
|
||||
method, self.concat_url(endpoint, url), **kwargs)
|
||||
except exceptions.Unauthorized as unauth_ex:
|
||||
if just_authenticated:
|
||||
raise
|
||||
self.cached_token = None
|
||||
client.cached_endpoint = None
|
||||
self.authenticate()
|
||||
try:
|
||||
token, endpoint = self.auth_plugin.token_and_endpoint(
|
||||
**filter_args)
|
||||
except exceptions.EndpointException:
|
||||
raise unauth_ex
|
||||
if (not (token and endpoint) or
|
||||
old_token_endpoint == (token, endpoint)):
|
||||
raise unauth_ex
|
||||
self.cached_token = token
|
||||
client.cached_endpoint = endpoint
|
||||
kwargs["headers"]["X-Auth-Token"] = token
|
||||
return self.request(
|
||||
method, self.concat_url(endpoint, url), **kwargs)
|
||||
|
||||
def add_client(self, base_client_instance):
|
||||
"""Add a new instance of :class:`BaseClient` descendant.
|
||||
|
||||
`self` will store a reference to `base_client_instance`.
|
||||
|
||||
Example:
|
||||
|
||||
>>> def test_clients():
|
||||
... from keystoneclient.auth import keystone
|
||||
... from openstack.common.apiclient import client
|
||||
... auth = keystone.KeystoneAuthPlugin(
|
||||
... username="user", password="pass", tenant_name="tenant",
|
||||
... auth_url="http://auth:5000/v2.0")
|
||||
... openstack_client = client.HTTPClient(auth)
|
||||
... # create nova client
|
||||
... from novaclient.v1_1 import client
|
||||
... client.Client(openstack_client)
|
||||
... # create keystone client
|
||||
... from keystoneclient.v2_0 import client
|
||||
... client.Client(openstack_client)
|
||||
... # use them
|
||||
... openstack_client.identity.tenants.list()
|
||||
... openstack_client.compute.servers.list()
|
||||
"""
|
||||
service_type = base_client_instance.service_type
|
||||
if service_type and not hasattr(self, service_type):
|
||||
setattr(self, service_type, base_client_instance)
|
||||
|
||||
def authenticate(self):
|
||||
self.auth_plugin.authenticate(self)
|
||||
# Store the authentication results in the keyring for later requests
|
||||
if self.keyring_saver:
|
||||
self.keyring_saver.save(self)
|
||||
|
||||
|
||||
class BaseClient(object):
|
||||
"""Top-level object to access the OpenStack API.
|
||||
|
||||
This client uses :class:`HTTPClient` to send requests. :class:`HTTPClient`
|
||||
will handle a bunch of issues such as authentication.
|
||||
"""
|
||||
|
||||
service_type = None
|
||||
endpoint_type = None # "publicURL" will be used
|
||||
cached_endpoint = None
|
||||
|
||||
def __init__(self, http_client, extensions=None):
|
||||
self.http_client = http_client
|
||||
http_client.add_client(self)
|
||||
|
||||
# Add in any extensions...
|
||||
if extensions:
|
||||
for extension in extensions:
|
||||
if extension.manager_class:
|
||||
setattr(self, extension.name,
|
||||
extension.manager_class(self))
|
||||
|
||||
def client_request(self, method, url, **kwargs):
|
||||
return self.http_client.client_request(
|
||||
self, method, url, **kwargs)
|
||||
|
||||
def head(self, url, **kwargs):
|
||||
return self.client_request("HEAD", url, **kwargs)
|
||||
|
||||
def get(self, url, **kwargs):
|
||||
return self.client_request("GET", url, **kwargs)
|
||||
|
||||
def post(self, url, **kwargs):
|
||||
return self.client_request("POST", url, **kwargs)
|
||||
|
||||
def put(self, url, **kwargs):
|
||||
return self.client_request("PUT", url, **kwargs)
|
||||
|
||||
def delete(self, url, **kwargs):
|
||||
return self.client_request("DELETE", url, **kwargs)
|
||||
|
||||
def patch(self, url, **kwargs):
|
||||
return self.client_request("PATCH", url, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def get_class(api_name, version, version_map):
|
||||
"""Returns the client class for the requested API version
|
||||
|
||||
:param api_name: the name of the API, e.g. 'compute', 'image', etc
|
||||
:param version: the requested API version
|
||||
:param version_map: a dict of client classes keyed by version
|
||||
:rtype: a client class for the requested API version
|
||||
"""
|
||||
try:
|
||||
client_path = version_map[str(version)]
|
||||
except (KeyError, ValueError):
|
||||
msg = _("Invalid %(api_name)s client version '%(version)s'. "
|
||||
"Must be one of: %(version_map)s") % {
|
||||
'api_name': api_name,
|
||||
'version': version,
|
||||
'version_map': ', '.join(version_map.keys())}
|
||||
raise exceptions.UnsupportedVersion(msg)
|
||||
|
||||
return importutils.import_class(client_path)
|
@ -1,466 +0,0 @@
|
||||
# Copyright 2010 Jacob Kaplan-Moss
|
||||
# Copyright 2011 Nebula, Inc.
|
||||
# Copyright 2013 Alessio Ababilov
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Exception definitions.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import sys
|
||||
|
||||
import six
|
||||
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
class ClientException(Exception):
|
||||
"""The base exception class for all exceptions this library raises.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class MissingArgs(ClientException):
|
||||
"""Supplied arguments are not sufficient for calling a function."""
|
||||
def __init__(self, missing):
|
||||
self.missing = missing
|
||||
msg = _("Missing arguments: %s") % ", ".join(missing)
|
||||
super(MissingArgs, self).__init__(msg)
|
||||
|
||||
|
||||
class ValidationError(ClientException):
|
||||
"""Error in validation on API client side."""
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedVersion(ClientException):
|
||||
"""User is trying to use an unsupported version of the API."""
|
||||
pass
|
||||
|
||||
|
||||
class CommandError(ClientException):
|
||||
"""Error in CLI tool."""
|
||||
pass
|
||||
|
||||
|
||||
class AuthorizationFailure(ClientException):
|
||||
"""Cannot authorize API client."""
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionRefused(ClientException):
|
||||
"""Cannot connect to API service."""
|
||||
pass
|
||||
|
||||
|
||||
class AuthPluginOptionsMissing(AuthorizationFailure):
|
||||
"""Auth plugin misses some options."""
|
||||
def __init__(self, opt_names):
|
||||
super(AuthPluginOptionsMissing, self).__init__(
|
||||
_("Authentication failed. Missing options: %s") %
|
||||
", ".join(opt_names))
|
||||
self.opt_names = opt_names
|
||||
|
||||
|
||||
class AuthSystemNotFound(AuthorizationFailure):
|
||||
"""User has specified an AuthSystem that is not installed."""
|
||||
def __init__(self, auth_system):
|
||||
super(AuthSystemNotFound, self).__init__(
|
||||
_("AuthSystemNotFound: %s") % repr(auth_system))
|
||||
self.auth_system = auth_system
|
||||
|
||||
|
||||
class NoUniqueMatch(ClientException):
|
||||
"""Multiple entities found instead of one."""
|
||||
pass
|
||||
|
||||
|
||||
class EndpointException(ClientException):
|
||||
"""Something is rotten in Service Catalog."""
|
||||
pass
|
||||
|
||||
|
||||
class EndpointNotFound(EndpointException):
|
||||
"""Could not find requested endpoint in Service Catalog."""
|
||||
pass
|
||||
|
||||
|
||||
class AmbiguousEndpoints(EndpointException):
|
||||
"""Found more than one matching endpoint in Service Catalog."""
|
||||
def __init__(self, endpoints=None):
|
||||
super(AmbiguousEndpoints, self).__init__(
|
||||
_("AmbiguousEndpoints: %s") % repr(endpoints))
|
||||
self.endpoints = endpoints
|
||||
|
||||
|
||||
class HttpError(ClientException):
|
||||
"""The base exception class for all HTTP exceptions.
|
||||
"""
|
||||
http_status = 0
|
||||
message = _("HTTP Error")
|
||||
|
||||
def __init__(self, message=None, details=None,
|
||||
response=None, request_id=None,
|
||||
url=None, method=None, http_status=None):
|
||||
self.http_status = http_status or self.http_status
|
||||
self.message = message or self.message
|
||||
self.details = details
|
||||
self.request_id = request_id
|
||||
self.response = response
|
||||
self.url = url
|
||||
self.method = method
|
||||
formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
|
||||
if request_id:
|
||||
formatted_string += " (Request-ID: %s)" % request_id
|
||||
super(HttpError, self).__init__(formatted_string)
|
||||
|
||||
|
||||
class HTTPRedirection(HttpError):
|
||||
"""HTTP Redirection."""
|
||||
message = _("HTTP Redirection")
|
||||
|
||||
|
||||
class HTTPClientError(HttpError):
|
||||
"""Client-side HTTP error.
|
||||
|
||||
Exception for cases in which the client seems to have erred.
|
||||
"""
|
||||
message = _("HTTP Client Error")
|
||||
|
||||
|
||||
class HttpServerError(HttpError):
|
||||
"""Server-side HTTP error.
|
||||
|
||||
Exception for cases in which the server is aware that it has
|
||||
erred or is incapable of performing the request.
|
||||
"""
|
||||
message = _("HTTP Server Error")
|
||||
|
||||
|
||||
class MultipleChoices(HTTPRedirection):
|
||||
"""HTTP 300 - Multiple Choices.
|
||||
|
||||
Indicates multiple options for the resource that the client may follow.
|
||||
"""
|
||||
|
||||
http_status = 300
|
||||
message = _("Multiple Choices")
|
||||
|
||||
|
||||
class BadRequest(HTTPClientError):
|
||||
"""HTTP 400 - Bad Request.
|
||||
|
||||
The request cannot be fulfilled due to bad syntax.
|
||||
"""
|
||||
http_status = 400
|
||||
message = _("Bad Request")
|
||||
|
||||
|
||||
class Unauthorized(HTTPClientError):
|
||||
"""HTTP 401 - Unauthorized.
|
||||
|
||||
Similar to 403 Forbidden, but specifically for use when authentication
|
||||
is required and has failed or has not yet been provided.
|
||||
"""
|
||||
http_status = 401
|
||||
message = _("Unauthorized")
|
||||
|
||||
|
||||
class PaymentRequired(HTTPClientError):
|
||||
"""HTTP 402 - Payment Required.
|
||||
|
||||
Reserved for future use.
|
||||
"""
|
||||
http_status = 402
|
||||
message = _("Payment Required")
|
||||
|
||||
|
||||
class Forbidden(HTTPClientError):
|
||||
"""HTTP 403 - Forbidden.
|
||||
|
||||
The request was a valid request, but the server is refusing to respond
|
||||
to it.
|
||||
"""
|
||||
http_status = 403
|
||||
message = _("Forbidden")
|
||||
|
||||
|
||||
class NotFound(HTTPClientError):
|
||||
"""HTTP 404 - Not Found.
|
||||
|
||||
The requested resource could not be found but may be available again
|
||||
in the future.
|
||||
"""
|
||||
http_status = 404
|
||||
message = _("Not Found")
|
||||
|
||||
|
||||
class MethodNotAllowed(HTTPClientError):
|
||||
"""HTTP 405 - Method Not Allowed.
|
||||
|
||||
A request was made of a resource using a request method not supported
|
||||
by that resource.
|
||||
"""
|
||||
http_status = 405
|
||||
message = _("Method Not Allowed")
|
||||
|
||||
|
||||
class NotAcceptable(HTTPClientError):
|
||||
"""HTTP 406 - Not Acceptable.
|
||||
|
||||
The requested resource is only capable of generating content not
|
||||
acceptable according to the Accept headers sent in the request.
|
||||
"""
|
||||
http_status = 406
|
||||
message = _("Not Acceptable")
|
||||
|
||||
|
||||
class ProxyAuthenticationRequired(HTTPClientError):
|
||||
"""HTTP 407 - Proxy Authentication Required.
|
||||
|
||||
The client must first authenticate itself with the proxy.
|
||||
"""
|
||||
http_status = 407
|
||||
message = _("Proxy Authentication Required")
|
||||
|
||||
|
||||
class RequestTimeout(HTTPClientError):
|
||||
"""HTTP 408 - Request Timeout.
|
||||
|
||||
The server timed out waiting for the request.
|
||||
"""
|
||||
http_status = 408
|
||||
message = _("Request Timeout")
|
||||
|
||||
|
||||
class Conflict(HTTPClientError):
|
||||
"""HTTP 409 - Conflict.
|
||||
|
||||
Indicates that the request could not be processed because of conflict
|
||||
in the request, such as an edit conflict.
|
||||
"""
|
||||
http_status = 409
|
||||
message = _("Conflict")
|
||||
|
||||
|
||||
class Gone(HTTPClientError):
|
||||
"""HTTP 410 - Gone.
|
||||
|
||||
Indicates that the resource requested is no longer available and will
|
||||
not be available again.
|
||||
"""
|
||||
http_status = 410
|
||||
message = _("Gone")
|
||||
|
||||
|
||||
class LengthRequired(HTTPClientError):
|
||||
"""HTTP 411 - Length Required.
|
||||
|
||||
The request did not specify the length of its content, which is
|
||||
required by the requested resource.
|
||||
"""
|
||||
http_status = 411
|
||||
message = _("Length Required")
|
||||
|
||||
|
||||
class PreconditionFailed(HTTPClientError):
|
||||
"""HTTP 412 - Precondition Failed.
|
||||
|
||||
The server does not meet one of the preconditions that the requester
|
||||
put on the request.
|
||||
"""
|
||||
http_status = 412
|
||||
message = _("Precondition Failed")
|
||||
|
||||
|
||||
class RequestEntityTooLarge(HTTPClientError):
|
||||
"""HTTP 413 - Request Entity Too Large.
|
||||
|
||||
The request is larger than the server is willing or able to process.
|
||||
"""
|
||||
http_status = 413
|
||||
message = _("Request Entity Too Large")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
self.retry_after = int(kwargs.pop('retry_after'))
|
||||
except (KeyError, ValueError):
|
||||
self.retry_after = 0
|
||||
|
||||
super(RequestEntityTooLarge, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class RequestUriTooLong(HTTPClientError):
|
||||
"""HTTP 414 - Request-URI Too Long.
|
||||
|
||||
The URI provided was too long for the server to process.
|
||||
"""
|
||||
http_status = 414
|
||||
message = _("Request-URI Too Long")
|
||||
|
||||
|
||||
class UnsupportedMediaType(HTTPClientError):
|
||||
"""HTTP 415 - Unsupported Media Type.
|
||||
|
||||
The request entity has a media type which the server or resource does
|
||||
not support.
|
||||
"""
|
||||
http_status = 415
|
||||
message = _("Unsupported Media Type")
|
||||
|
||||
|
||||
class RequestedRangeNotSatisfiable(HTTPClientError):
|
||||
"""HTTP 416 - Requested Range Not Satisfiable.
|
||||
|
||||
The client has asked for a portion of the file, but the server cannot
|
||||
supply that portion.
|
||||
"""
|
||||
http_status = 416
|
||||
message = _("Requested Range Not Satisfiable")
|
||||
|
||||
|
||||
class ExpectationFailed(HTTPClientError):
|
||||
"""HTTP 417 - Expectation Failed.
|
||||
|
||||
The server cannot meet the requirements of the Expect request-header field.
|
||||
"""
|
||||
http_status = 417
|
||||
message = _("Expectation Failed")
|
||||
|
||||
|
||||
class UnprocessableEntity(HTTPClientError):
|
||||
"""HTTP 422 - Unprocessable Entity.
|
||||
|
||||
The request was well-formed but was unable to be followed due to semantic
|
||||
errors.
|
||||
"""
|
||||
http_status = 422
|
||||
message = _("Unprocessable Entity")
|
||||
|
||||
|
||||
class InternalServerError(HttpServerError):
|
||||
"""HTTP 500 - Internal Server Error.
|
||||
|
||||
A generic error message, given when no more specific message is suitable.
|
||||
"""
|
||||
http_status = 500
|
||||
message = _("Internal Server Error")
|
||||
|
||||
|
||||
# NotImplemented is a python keyword.
|
||||
class HttpNotImplemented(HttpServerError):
|
||||
"""HTTP 501 - Not Implemented.
|
||||
|
||||
The server either does not recognize the request method, or it lacks
|
||||
the ability to fulfill the request.
|
||||
"""
|
||||
http_status = 501
|
||||
message = _("Not Implemented")
|
||||
|
||||
|
||||
class BadGateway(HttpServerError):
|
||||
"""HTTP 502 - Bad Gateway.
|
||||
|
||||
The server was acting as a gateway or proxy and received an invalid
|
||||
response from the upstream server.
|
||||
"""
|
||||
http_status = 502
|
||||
message = _("Bad Gateway")
|
||||
|
||||
|
||||
class ServiceUnavailable(HttpServerError):
|
||||
"""HTTP 503 - Service Unavailable.
|
||||
|
||||
The server is currently unavailable.
|
||||
"""
|
||||
http_status = 503
|
||||
message = _("Service Unavailable")
|
||||
|
||||
|
||||
class GatewayTimeout(HttpServerError):
|
||||
"""HTTP 504 - Gateway Timeout.
|
||||
|
||||
The server was acting as a gateway or proxy and did not receive a timely
|
||||
response from the upstream server.
|
||||
"""
|
||||
http_status = 504
|
||||
message = _("Gateway Timeout")
|
||||
|
||||
|
||||
class HttpVersionNotSupported(HttpServerError):
|
||||
"""HTTP 505 - HttpVersion Not Supported.
|
||||
|
||||
The server does not support the HTTP protocol version used in the request.
|
||||
"""
|
||||
http_status = 505
|
||||
message = _("HTTP Version Not Supported")
|
||||
|
||||
|
||||
# _code_map contains all the classes that have http_status attribute.
|
||||
_code_map = dict(
|
||||
(getattr(obj, 'http_status', None), obj)
|
||||
for name, obj in six.iteritems(vars(sys.modules[__name__]))
|
||||
if inspect.isclass(obj) and getattr(obj, 'http_status', False)
|
||||
)
|
||||
|
||||
|
||||
def from_response(response, method, url):
|
||||
"""Returns an instance of :class:`HttpError` or subclass based on response.
|
||||
|
||||
:param response: instance of `requests.Response` class
|
||||
:param method: HTTP method used for request
|
||||
:param url: URL used for request
|
||||
"""
|
||||
|
||||
req_id = response.headers.get("x-openstack-request-id")
|
||||
# NOTE(hdd) true for older versions of nova and cinder
|
||||
if not req_id:
|
||||
req_id = response.headers.get("x-compute-request-id")
|
||||
kwargs = {
|
||||
"http_status": response.status_code,
|
||||
"response": response,
|
||||
"method": method,
|
||||
"url": url,
|
||||
"request_id": req_id,
|
||||
}
|
||||
if "retry-after" in response.headers:
|
||||
kwargs["retry_after"] = response.headers["retry-after"]
|
||||
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if content_type.startswith("application/json"):
|
||||
try:
|
||||
body = response.json()
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if isinstance(body, dict) and isinstance(body.get("error"), dict):
|
||||
error = body["error"]
|
||||
kwargs["message"] = error.get("message")
|
||||
kwargs["details"] = error.get("details")
|
||||
elif content_type.startswith("text/"):
|
||||
kwargs["details"] = response.text
|
||||
|
||||
try:
|
||||
cls = _code_map[response.status_code]
|
||||
except KeyError:
|
||||
if 500 <= response.status_code < 600:
|
||||
cls = HttpServerError
|
||||
elif 400 <= response.status_code < 500:
|
||||
cls = HTTPClientError
|
||||
else:
|
||||
cls = HttpError
|
||||
return cls(**kwargs)
|
@ -1,173 +0,0 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
A fake server that "responds" to API methods with pre-canned responses.
|
||||
|
||||
All of these responses come from the spec, so if for some reason the spec's
|
||||
wrong the tests might raise AssertionError. I've indicated in comments the
|
||||
places where actual behavior differs from the spec.
|
||||
"""
|
||||
|
||||
# W0102: Dangerous default value %s as argument
|
||||
# pylint: disable=W0102
|
||||
|
||||
import json
|
||||
|
||||
import requests
|
||||
import six
|
||||
from six.moves.urllib import parse
|
||||
|
||||
from rackclient.openstack.common.apiclient import client
|
||||
|
||||
|
||||
def assert_has_keys(dct, required=[], optional=[]):
|
||||
for k in required:
|
||||
try:
|
||||
assert k in dct
|
||||
except AssertionError:
|
||||
extra_keys = set(dct.keys()).difference(set(required + optional))
|
||||
raise AssertionError("found unexpected keys: %s" %
|
||||
list(extra_keys))
|
||||
|
||||
|
||||
class TestResponse(requests.Response):
|
||||
"""Wrap requests.Response and provide a convenient initialization.
|
||||
"""
|
||||
|
||||
def __init__(self, data):
|
||||
super(TestResponse, self).__init__()
|
||||
self._content_consumed = True
|
||||
if isinstance(data, dict):
|
||||
self.status_code = data.get('status_code', 200)
|
||||
# Fake the text attribute to streamline Response creation
|
||||
text = data.get('text', "")
|
||||
if isinstance(text, (dict, list)):
|
||||
self._content = json.dumps(text)
|
||||
default_headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
else:
|
||||
self._content = text
|
||||
default_headers = {}
|
||||
if six.PY3 and isinstance(self._content, six.string_types):
|
||||
self._content = self._content.encode('utf-8', 'strict')
|
||||
self.headers = data.get('headers') or default_headers
|
||||
else:
|
||||
self.status_code = data
|
||||
|
||||
def __eq__(self, other):
|
||||
return (self.status_code == other.status_code and
|
||||
self.headers == other.headers and
|
||||
self._content == other._content)
|
||||
|
||||
|
||||
class FakeHTTPClient(client.HTTPClient):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.callstack = []
|
||||
self.fixtures = kwargs.pop("fixtures", None) or {}
|
||||
if not args and "auth_plugin" not in kwargs:
|
||||
args = (None, )
|
||||
super(FakeHTTPClient, self).__init__(*args, **kwargs)
|
||||
|
||||
def assert_called(self, method, url, body=None, pos=-1):
|
||||
"""Assert than an API method was just called.
|
||||
"""
|
||||
expected = (method, url)
|
||||
called = self.callstack[pos][0:2]
|
||||
assert self.callstack, \
|
||||
"Expected %s %s but no calls were made." % expected
|
||||
|
||||
assert expected == called, 'Expected %s %s; got %s %s' % \
|
||||
(expected + called)
|
||||
|
||||
if body is not None:
|
||||
if self.callstack[pos][3] != body:
|
||||
raise AssertionError('%r != %r' %
|
||||
(self.callstack[pos][3], body))
|
||||
|
||||
def assert_called_anytime(self, method, url, body=None):
|
||||
"""Assert than an API method was called anytime in the test.
|
||||
"""
|
||||
expected = (method, url)
|
||||
|
||||
assert self.callstack, \
|
||||
"Expected %s %s but no calls were made." % expected
|
||||
|
||||
found = False
|
||||
entry = None
|
||||
for entry in self.callstack:
|
||||
if expected == entry[0:2]:
|
||||
found = True
|
||||
break
|
||||
|
||||
assert found, 'Expected %s %s; got %s' % \
|
||||
(method, url, self.callstack)
|
||||
if body is not None:
|
||||
assert entry[3] == body, "%s != %s" % (entry[3], body)
|
||||
|
||||
self.callstack = []
|
||||
|
||||
def clear_callstack(self):
|
||||
self.callstack = []
|
||||
|
||||
def authenticate(self):
|
||||
pass
|
||||
|
||||
def client_request(self, client, method, url, **kwargs):
|
||||
# Check that certain things are called correctly
|
||||
if method in ["GET", "DELETE"]:
|
||||
assert "json" not in kwargs
|
||||
|
||||
# Note the call
|
||||
self.callstack.append(
|
||||
(method,
|
||||
url,
|
||||
kwargs.get("headers") or {},
|
||||
kwargs.get("json") or kwargs.get("data")))
|
||||
try:
|
||||
fixture = self.fixtures[url][method]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
return TestResponse({"headers": fixture[0],
|
||||
"text": fixture[1]})
|
||||
|
||||
# Call the method
|
||||
args = parse.parse_qsl(parse.urlparse(url)[4])
|
||||
kwargs.update(args)
|
||||
munged_url = url.rsplit('?', 1)[0]
|
||||
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
|
||||
munged_url = munged_url.replace('-', '_')
|
||||
|
||||
callback = "%s_%s" % (method.lower(), munged_url)
|
||||
|
||||
if not hasattr(self, callback):
|
||||
raise AssertionError('Called unknown API method: %s %s, '
|
||||
'expected fakes method name: %s' %
|
||||
(method, url, callback))
|
||||
|
||||
resp = getattr(self, callback)(**kwargs)
|
||||
if len(resp) == 3:
|
||||
status, headers, body = resp
|
||||
else:
|
||||
status, body = resp
|
||||
headers = {}
|
||||
return TestResponse({
|
||||
"status_code": status,
|
||||
"text": body,
|
||||
"headers": headers,
|
||||
})
|
@ -1,317 +0,0 @@
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# W0603: Using the global statement
|
||||
# W0621: Redefining name %s from outer scope
|
||||
# pylint: disable=W0603,W0621
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import getpass
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
import prettytable
|
||||
import six
|
||||
from six import moves
|
||||
|
||||
from rackclient.openstack.common.apiclient import exceptions
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
from rackclient.openstack.common import strutils
|
||||
from rackclient.openstack.common import uuidutils
|
||||
|
||||
|
||||
def validate_args(fn, *args, **kwargs):
|
||||
"""Check that the supplied args are sufficient for calling a function.
|
||||
|
||||
>>> validate_args(lambda a: None)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
MissingArgs: Missing argument(s): a
|
||||
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
MissingArgs: Missing argument(s): b, d
|
||||
|
||||
:param fn: the function to check
|
||||
:param arg: the positional arguments supplied
|
||||
:param kwargs: the keyword arguments supplied
|
||||
"""
|
||||
argspec = inspect.getargspec(fn)
|
||||
|
||||
num_defaults = len(argspec.defaults or [])
|
||||
required_args = argspec.args[:len(argspec.args) - num_defaults]
|
||||
|
||||
def isbound(method):
|
||||
return getattr(method, '__self__', None) is not None
|
||||
|
||||
if isbound(fn):
|
||||
required_args.pop(0)
|
||||
|
||||
missing = [arg for arg in required_args if arg not in kwargs]
|
||||
missing = missing[len(args):]
|
||||
if missing:
|
||||
raise exceptions.MissingArgs(missing)
|
||||
|
||||
|
||||
def arg(*args, **kwargs):
|
||||
"""Decorator for CLI args.
|
||||
|
||||
Example:
|
||||
|
||||
>>> @arg("name", help="Name of the new entity")
|
||||
... def entity_create(args):
|
||||
... pass
|
||||
"""
|
||||
def _decorator(func):
|
||||
add_arg(func, *args, **kwargs)
|
||||
return func
|
||||
return _decorator
|
||||
|
||||
|
||||
def env(*args, **kwargs):
|
||||
"""Returns the first environment variable set.
|
||||
|
||||
If all are empty, defaults to '' or keyword arg `default`.
|
||||
"""
|
||||
for arg in args:
|
||||
value = os.environ.get(arg)
|
||||
if value:
|
||||
return value
|
||||
return kwargs.get('default', '')
|
||||
|
||||
|
||||
def add_arg(func, *args, **kwargs):
|
||||
"""Bind CLI arguments to a shell.py `do_foo` function."""
|
||||
|
||||
if not hasattr(func, 'arguments'):
|
||||
func.arguments = []
|
||||
|
||||
# NOTE(sirp): avoid dups that can occur when the module is shared across
|
||||
# tests.
|
||||
if (args, kwargs) not in func.arguments:
|
||||
# Because of the semantics of decorator composition if we just append
|
||||
# to the options list positional options will appear to be backwards.
|
||||
func.arguments.insert(0, (args, kwargs))
|
||||
|
||||
|
||||
def unauthenticated(func):
|
||||
"""Adds 'unauthenticated' attribute to decorated function.
|
||||
|
||||
Usage:
|
||||
|
||||
>>> @unauthenticated
|
||||
... def mymethod(f):
|
||||
... pass
|
||||
"""
|
||||
func.unauthenticated = True
|
||||
return func
|
||||
|
||||
|
||||
def isunauthenticated(func):
|
||||
"""Checks if the function does not require authentication.
|
||||
|
||||
Mark such functions with the `@unauthenticated` decorator.
|
||||
|
||||
:returns: bool
|
||||
"""
|
||||
return getattr(func, 'unauthenticated', False)
|
||||
|
||||
|
||||
def print_list(objs, fields, formatters=None, sortby_index=0,
|
||||
mixed_case_fields=None):
|
||||
"""Print a list or objects as a table, one row per object.
|
||||
|
||||
:param objs: iterable of :class:`Resource`
|
||||
:param fields: attributes that correspond to columns, in order
|
||||
:param formatters: `dict` of callables for field formatting
|
||||
:param sortby_index: index of the field for sorting table rows
|
||||
:param mixed_case_fields: fields corresponding to object attributes that
|
||||
have mixed case names (e.g., 'serverId')
|
||||
"""
|
||||
formatters = formatters or {}
|
||||
mixed_case_fields = mixed_case_fields or []
|
||||
if sortby_index is None:
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = {'sortby': fields[sortby_index]}
|
||||
pt = prettytable.PrettyTable(fields, caching=False)
|
||||
pt.align = 'l'
|
||||
|
||||
for o in objs:
|
||||
row = []
|
||||
for field in fields:
|
||||
if field in formatters:
|
||||
row.append(formatters[field](o))
|
||||
else:
|
||||
if field in mixed_case_fields:
|
||||
field_name = field.replace(' ', '_')
|
||||
else:
|
||||
field_name = field.lower().replace(' ', '_')
|
||||
data = getattr(o, field_name, '')
|
||||
row.append(data)
|
||||
pt.add_row(row)
|
||||
|
||||
print(strutils.safe_encode(pt.get_string(**kwargs)))
|
||||
|
||||
|
||||
def print_dict(dct, dict_property="Property", wrap=0):
|
||||
"""Print a `dict` as a table of two columns.
|
||||
|
||||
:param dct: `dict` to print
|
||||
:param dict_property: name of the first column
|
||||
:param wrap: wrapping for the second column
|
||||
"""
|
||||
pt = prettytable.PrettyTable([dict_property, 'Value'], caching=False)
|
||||
pt.align = 'l'
|
||||
for k, v in six.iteritems(dct):
|
||||
# convert dict to str to check length
|
||||
if isinstance(v, dict):
|
||||
v = six.text_type(v)
|
||||
if wrap > 0:
|
||||
v = textwrap.fill(six.text_type(v), wrap)
|
||||
# if value has a newline, add in multiple rows
|
||||
# e.g. fault with stacktrace
|
||||
if v and isinstance(v, six.string_types) and r'\n' in v:
|
||||
lines = v.strip().split(r'\n')
|
||||
col1 = k
|
||||
for line in lines:
|
||||
pt.add_row([col1, line])
|
||||
col1 = ''
|
||||
else:
|
||||
pt.add_row([k, v])
|
||||
print(strutils.safe_encode(pt.get_string()))
|
||||
|
||||
|
||||
def get_password(max_password_prompts=3):
|
||||
"""Read password from TTY."""
|
||||
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
|
||||
pw = None
|
||||
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
|
||||
# Check for Ctrl-D
|
||||
try:
|
||||
for __ in moves.range(max_password_prompts):
|
||||
pw1 = getpass.getpass("OS Password: ")
|
||||
if verify:
|
||||
pw2 = getpass.getpass("Please verify: ")
|
||||
else:
|
||||
pw2 = pw1
|
||||
if pw1 == pw2 and pw1:
|
||||
pw = pw1
|
||||
break
|
||||
except EOFError:
|
||||
pass
|
||||
return pw
|
||||
|
||||
|
||||
def find_resource(manager, name_or_id, **find_args):
|
||||
"""Look for resource in a given manager.
|
||||
|
||||
Used as a helper for the _find_* methods.
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def _find_hypervisor(cs, hypervisor):
|
||||
#Get a hypervisor by name or ID.
|
||||
return cliutils.find_resource(cs.hypervisors, hypervisor)
|
||||
"""
|
||||
# first try to get entity as integer id
|
||||
try:
|
||||
return manager.get(int(name_or_id))
|
||||
except (TypeError, ValueError, exceptions.NotFound):
|
||||
pass
|
||||
|
||||
# now try to get entity as uuid
|
||||
try:
|
||||
if six.PY2:
|
||||
tmp_id = strutils.safe_encode(name_or_id)
|
||||
else:
|
||||
tmp_id = strutils.safe_decode(name_or_id)
|
||||
|
||||
if uuidutils.is_uuid_like(tmp_id):
|
||||
return manager.get(tmp_id)
|
||||
except (TypeError, ValueError, exceptions.NotFound):
|
||||
pass
|
||||
|
||||
# for str id which is not uuid
|
||||
if getattr(manager, 'is_alphanum_id_allowed', False):
|
||||
try:
|
||||
return manager.get(name_or_id)
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
|
||||
try:
|
||||
try:
|
||||
return manager.find(human_id=name_or_id, **find_args)
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
|
||||
# finally try to find entity by name
|
||||
try:
|
||||
resource = getattr(manager, 'resource_class', None)
|
||||
name_attr = resource.NAME_ATTR if resource else 'name'
|
||||
kwargs = {name_attr: name_or_id}
|
||||
kwargs.update(find_args)
|
||||
return manager.find(**kwargs)
|
||||
except exceptions.NotFound:
|
||||
msg = _("No %(name)s with a name or "
|
||||
"ID of '%(name_or_id)s' exists.") % \
|
||||
{
|
||||
"name": manager.resource_class.__name__.lower(),
|
||||
"name_or_id": name_or_id
|
||||
}
|
||||
raise exceptions.CommandError(msg)
|
||||
except exceptions.NoUniqueMatch:
|
||||
msg = _("Multiple %(name)s matches found for "
|
||||
"'%(name_or_id)s', use an ID to be more specific.") % \
|
||||
{
|
||||
"name": manager.resource_class.__name__.lower(),
|
||||
"name_or_id": name_or_id
|
||||
}
|
||||
raise exceptions.CommandError(msg)
|
||||
|
||||
|
||||
def service_type(stype):
|
||||
"""Adds 'service_type' attribute to decorated function.
|
||||
|
||||
Usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@service_type('volume')
|
||||
def mymethod(f):
|
||||
...
|
||||
"""
|
||||
def inner(f):
|
||||
f.service_type = stype
|
||||
return f
|
||||
return inner
|
||||
|
||||
|
||||
def get_service_type(f):
|
||||
"""Retrieves service type from function."""
|
||||
return getattr(f, 'service_type', None)
|
||||
|
||||
|
||||
def pretty_choice_list(l):
|
||||
return ', '.join("'%s'" % i for i in l)
|
||||
|
||||
|
||||
def exit(msg=''):
|
||||
if msg:
|
||||
print (msg, file=sys.stderr)
|
||||
sys.exit(1)
|
@ -1,479 +0,0 @@
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
# Copyright 2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
gettext for openstack-common modules.
|
||||
|
||||
Usual usage in an openstack.common module:
|
||||
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
"""
|
||||
|
||||
import copy
|
||||
import gettext
|
||||
import locale
|
||||
from logging import handlers
|
||||
import os
|
||||
|
||||
from babel import localedata
|
||||
import six
|
||||
|
||||
_AVAILABLE_LANGUAGES = {}
|
||||
|
||||
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
|
||||
USE_LAZY = False
|
||||
|
||||
|
||||
class TranslatorFactory(object):
|
||||
"""Create translator functions
|
||||
"""
|
||||
|
||||
def __init__(self, domain, localedir=None):
|
||||
"""Establish a set of translation functions for the domain.
|
||||
|
||||
:param domain: Name of translation domain,
|
||||
specifying a message catalog.
|
||||
:type domain: str
|
||||
:param lazy: Delays translation until a message is emitted.
|
||||
Defaults to False.
|
||||
:type lazy: Boolean
|
||||
:param localedir: Directory with translation catalogs.
|
||||
:type localedir: str
|
||||
"""
|
||||
self.domain = domain
|
||||
if localedir is None:
|
||||
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||
self.localedir = localedir
|
||||
|
||||
def _make_translation_func(self, domain=None):
|
||||
"""Return a new translation function ready for use.
|
||||
|
||||
Takes into account whether or not lazy translation is being
|
||||
done.
|
||||
|
||||
The domain can be specified to override the default from the
|
||||
factory, but the localedir from the factory is always used
|
||||
because we assume the log-level translation catalogs are
|
||||
installed in the same directory as the main application
|
||||
catalog.
|
||||
|
||||
"""
|
||||
if domain is None:
|
||||
domain = self.domain
|
||||
t = gettext.translation(domain,
|
||||
localedir=self.localedir,
|
||||
fallback=True)
|
||||
# Use the appropriate method of the translation object based
|
||||
# on the python version.
|
||||
m = t.gettext if six.PY3 else t.ugettext
|
||||
|
||||
def f(msg):
|
||||
"""oslo.i18n.gettextutils translation function."""
|
||||
if USE_LAZY:
|
||||
return Message(msg, domain=domain)
|
||||
return m(msg)
|
||||
return f
|
||||
|
||||
@property
|
||||
def primary(self):
|
||||
"The default translation function."
|
||||
return self._make_translation_func()
|
||||
|
||||
def _make_log_translation_func(self, level):
|
||||
return self._make_translation_func(self.domain + '-log-' + level)
|
||||
|
||||
@property
|
||||
def log_info(self):
|
||||
"Translate info-level log messages."
|
||||
return self._make_log_translation_func('info')
|
||||
|
||||
@property
|
||||
def log_warning(self):
|
||||
"Translate warning-level log messages."
|
||||
return self._make_log_translation_func('warning')
|
||||
|
||||
@property
|
||||
def log_error(self):
|
||||
"Translate error-level log messages."
|
||||
return self._make_log_translation_func('error')
|
||||
|
||||
@property
|
||||
def log_critical(self):
|
||||
"Translate critical-level log messages."
|
||||
return self._make_log_translation_func('critical')
|
||||
|
||||
|
||||
# NOTE(dhellmann): When this module moves out of the incubator into
|
||||
# oslo.i18n, these global variables can be moved to an integration
|
||||
# module within each application.
|
||||
|
||||
# Create the global translation functions.
|
||||
_translators = TranslatorFactory('rackclient')
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
||||
|
||||
# NOTE(dhellmann): End of globals that will move to the application's
|
||||
# integration module.
|
||||
|
||||
|
||||
def enable_lazy():
|
||||
"""Convenience function for configuring _() to use lazy gettext
|
||||
|
||||
Call this at the start of execution to enable the gettextutils._
|
||||
function to use lazy gettext functionality. This is useful if
|
||||
your project is importing _ directly instead of using the
|
||||
gettextutils.install() way of importing the _ function.
|
||||
"""
|
||||
global USE_LAZY
|
||||
USE_LAZY = True
|
||||
|
||||
|
||||
def install(domain):
|
||||
"""Install a _() function using the given translation domain.
|
||||
|
||||
Given a translation domain, install a _() function using gettext's
|
||||
install() function.
|
||||
|
||||
The main difference from gettext.install() is that we allow
|
||||
overriding the default localedir (e.g. /usr/share/locale) using
|
||||
a translation-domain-specific environment variable (e.g.
|
||||
NOVA_LOCALEDIR).
|
||||
|
||||
Note that to enable lazy translation, enable_lazy must be
|
||||
called.
|
||||
|
||||
:param domain: the translation domain
|
||||
"""
|
||||
from six import moves
|
||||
tf = TranslatorFactory(domain)
|
||||
moves.builtins.__dict__['_'] = tf.primary
|
||||
|
||||
|
||||
class Message(six.text_type):
|
||||
"""A Message object is a unicode object that can be translated.
|
||||
|
||||
Translation of Message is done explicitly using the translate() method.
|
||||
For all non-translation intents and purposes, a Message is simply unicode,
|
||||
and can be treated as such.
|
||||
"""
|
||||
|
||||
def __new__(cls, msgid, msgtext=None, params=None,
|
||||
domain='rackclient', *args):
|
||||
"""Create a new Message object.
|
||||
|
||||
In order for translation to work gettext requires a message ID, this
|
||||
msgid will be used as the base unicode text. It is also possible
|
||||
for the msgid and the base unicode text to be different by passing
|
||||
the msgtext parameter.
|
||||
"""
|
||||
# If the base msgtext is not given, we use the default translation
|
||||
# of the msgid (which is in English) just in case the system locale is
|
||||
# not English, so that the base text will be in that locale by default.
|
||||
if not msgtext:
|
||||
msgtext = Message._translate_msgid(msgid, domain)
|
||||
# We want to initialize the parent unicode with the actual object that
|
||||
# would have been plain unicode if 'Message' was not enabled.
|
||||
msg = super(Message, cls).__new__(cls, msgtext)
|
||||
msg.msgid = msgid
|
||||
msg.domain = domain
|
||||
msg.params = params
|
||||
return msg
|
||||
|
||||
def translate(self, desired_locale=None):
|
||||
"""Translate this message to the desired locale.
|
||||
|
||||
:param desired_locale: The desired locale to translate the message to,
|
||||
if no locale is provided the message will be
|
||||
translated to the system's default locale.
|
||||
|
||||
:returns: the translated message in unicode
|
||||
"""
|
||||
|
||||
translated_message = Message._translate_msgid(self.msgid,
|
||||
self.domain,
|
||||
desired_locale)
|
||||
if self.params is None:
|
||||
# No need for more translation
|
||||
return translated_message
|
||||
|
||||
# This Message object may have been formatted with one or more
|
||||
# Message objects as substitution arguments, given either as a single
|
||||
# argument, part of a tuple, or as one or more values in a dictionary.
|
||||
# When translating this Message we need to translate those Messages too
|
||||
translated_params = _translate_args(self.params, desired_locale)
|
||||
|
||||
translated_message = translated_message % translated_params
|
||||
|
||||
return translated_message
|
||||
|
||||
@staticmethod
|
||||
def _translate_msgid(msgid, domain, desired_locale=None):
|
||||
if not desired_locale:
|
||||
system_locale = locale.getdefaultlocale()
|
||||
# If the system locale is not available to the runtime use English
|
||||
if not system_locale[0]:
|
||||
desired_locale = 'en_US'
|
||||
else:
|
||||
desired_locale = system_locale[0]
|
||||
|
||||
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
|
||||
lang = gettext.translation(domain,
|
||||
localedir=locale_dir,
|
||||
languages=[desired_locale],
|
||||
fallback=True)
|
||||
if six.PY3:
|
||||
translator = lang.gettext
|
||||
else:
|
||||
translator = lang.ugettext
|
||||
|
||||
translated_message = translator(msgid)
|
||||
return translated_message
|
||||
|
||||
def __mod__(self, other):
|
||||
# When we mod a Message we want the actual operation to be performed
|
||||
# by the parent class (i.e. unicode()), the only thing we do here is
|
||||
# save the original msgid and the parameters in case of a translation
|
||||
params = self._sanitize_mod_params(other)
|
||||
unicode_mod = super(Message, self).__mod__(params)
|
||||
modded = Message(self.msgid,
|
||||
msgtext=unicode_mod,
|
||||
params=params,
|
||||
domain=self.domain)
|
||||
return modded
|
||||
|
||||
def _sanitize_mod_params(self, other):
|
||||
"""Sanitize the object being modded with this Message.
|
||||
|
||||
- Add support for modding 'None' so translation supports it
|
||||
- Trim the modded object, which can be a large dictionary, to only
|
||||
those keys that would actually be used in a translation
|
||||
- Snapshot the object being modded, in case the message is
|
||||
translated, it will be used as it was when the Message was created
|
||||
"""
|
||||
if other is None:
|
||||
params = (other,)
|
||||
elif isinstance(other, dict):
|
||||
# Merge the dictionaries
|
||||
# Copy each item in case one does not support deep copy.
|
||||
params = {}
|
||||
if isinstance(self.params, dict):
|
||||
for key, val in self.params.items():
|
||||
params[key] = self._copy_param(val)
|
||||
for key, val in other.items():
|
||||
params[key] = self._copy_param(val)
|
||||
else:
|
||||
params = self._copy_param(other)
|
||||
return params
|
||||
|
||||
def _copy_param(self, param):
|
||||
try:
|
||||
return copy.deepcopy(param)
|
||||
except Exception:
|
||||
# Fallback to casting to unicode this will handle the
|
||||
# python code-like objects that can't be deep-copied
|
||||
return six.text_type(param)
|
||||
|
||||
def __add__(self, other):
|
||||
msg = _('Message objects do not support addition.')
|
||||
raise TypeError(msg)
|
||||
|
||||
def __radd__(self, other):
|
||||
return self.__add__(other)
|
||||
|
||||
if six.PY2:
|
||||
def __str__(self):
|
||||
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
|
||||
# and it expects specifically a UnicodeError in order to proceed.
|
||||
msg = _('Message objects do not support str() because they may '
|
||||
'contain non-ascii characters. '
|
||||
'Please use unicode() or translate() instead.')
|
||||
raise UnicodeError(msg)
|
||||
|
||||
|
||||
def get_available_languages(domain):
|
||||
"""Lists the available languages for the given translation domain.
|
||||
|
||||
:param domain: the domain to get languages for
|
||||
"""
|
||||
if domain in _AVAILABLE_LANGUAGES:
|
||||
return copy.copy(_AVAILABLE_LANGUAGES[domain])
|
||||
|
||||
localedir = '%s_LOCALEDIR' % domain.upper()
|
||||
find = lambda x: gettext.find(domain,
|
||||
localedir=os.environ.get(localedir),
|
||||
languages=[x])
|
||||
|
||||
# NOTE(mrodden): en_US should always be available (and first in case
|
||||
# order matters) since our in-line message strings are en_US
|
||||
language_list = ['en_US']
|
||||
# NOTE(luisg): Babel <1.0 used a function called list(), which was
|
||||
# renamed to locale_identifiers() in >=1.0, the requirements master list
|
||||
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
|
||||
# this check when the master list updates to >=1.0, and update all projects
|
||||
list_identifiers = (getattr(localedata, 'list', None) or
|
||||
getattr(localedata, 'locale_identifiers'))
|
||||
locale_identifiers = list_identifiers()
|
||||
|
||||
for i in locale_identifiers:
|
||||
if find(i) is not None:
|
||||
language_list.append(i)
|
||||
|
||||
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
|
||||
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
|
||||
# are perfectly legitimate locales:
|
||||
# https://github.com/mitsuhiko/babel/issues/37
|
||||
# In Babel 1.3 they fixed the bug and they support these locales, but
|
||||
# they are still not explicitly "listed" by locale_identifiers().
|
||||
# That is why we add the locales here explicitly if necessary so that
|
||||
# they are listed as supported.
|
||||
aliases = {'zh': 'zh_CN',
|
||||
'zh_Hant_HK': 'zh_HK',
|
||||
'zh_Hant': 'zh_TW',
|
||||
'fil': 'tl_PH'}
|
||||
for (locale_, alias) in six.iteritems(aliases):
|
||||
if locale_ in language_list and alias not in language_list:
|
||||
language_list.append(alias)
|
||||
|
||||
_AVAILABLE_LANGUAGES[domain] = language_list
|
||||
return copy.copy(language_list)
|
||||
|
||||
|
||||
def translate(obj, desired_locale=None):
|
||||
"""Gets the translated unicode representation of the given object.
|
||||
|
||||
If the object is not translatable it is returned as-is.
|
||||
If the locale is None the object is translated to the system locale.
|
||||
|
||||
:param obj: the object to translate
|
||||
:param desired_locale: the locale to translate the message to, if None the
|
||||
default system locale will be used
|
||||
:returns: the translated object in unicode, or the original object if
|
||||
it could not be translated
|
||||
"""
|
||||
message = obj
|
||||
if not isinstance(message, Message):
|
||||
# If the object to translate is not already translatable,
|
||||
# let's first get its unicode representation
|
||||
message = six.text_type(obj)
|
||||
if isinstance(message, Message):
|
||||
# Even after unicoding() we still need to check if we are
|
||||
# running with translatable unicode before translating
|
||||
return message.translate(desired_locale)
|
||||
return obj
|
||||
|
||||
|
||||
def _translate_args(args, desired_locale=None):
|
||||
"""Translates all the translatable elements of the given arguments object.
|
||||
|
||||
This method is used for translating the translatable values in method
|
||||
arguments which include values of tuples or dictionaries.
|
||||
If the object is not a tuple or a dictionary the object itself is
|
||||
translated if it is translatable.
|
||||
|
||||
If the locale is None the object is translated to the system locale.
|
||||
|
||||
:param args: the args to translate
|
||||
:param desired_locale: the locale to translate the args to, if None the
|
||||
default system locale will be used
|
||||
:returns: a new args object with the translated contents of the original
|
||||
"""
|
||||
if isinstance(args, tuple):
|
||||
return tuple(translate(v, desired_locale) for v in args)
|
||||
if isinstance(args, dict):
|
||||
translated_dict = {}
|
||||
for (k, v) in six.iteritems(args):
|
||||
translated_v = translate(v, desired_locale)
|
||||
translated_dict[k] = translated_v
|
||||
return translated_dict
|
||||
return translate(args, desired_locale)
|
||||
|
||||
|
||||
class TranslationHandler(handlers.MemoryHandler):
|
||||
"""Handler that translates records before logging them.
|
||||
|
||||
The TranslationHandler takes a locale and a target logging.Handler object
|
||||
to forward LogRecord objects to after translating them. This handler
|
||||
depends on Message objects being logged, instead of regular strings.
|
||||
|
||||
The handler can be configured declaratively in the logging.conf as follows:
|
||||
|
||||
[handlers]
|
||||
keys = translatedlog, translator
|
||||
|
||||
[handler_translatedlog]
|
||||
class = handlers.WatchedFileHandler
|
||||
args = ('/var/log/api-localized.log',)
|
||||
formatter = context
|
||||
|
||||
[handler_translator]
|
||||
class = openstack.common.log.TranslationHandler
|
||||
target = translatedlog
|
||||
args = ('zh_CN',)
|
||||
|
||||
If the specified locale is not available in the system, the handler will
|
||||
log in the default locale.
|
||||
"""
|
||||
|
||||
def __init__(self, locale=None, target=None):
|
||||
"""Initialize a TranslationHandler
|
||||
|
||||
:param locale: locale to use for translating messages
|
||||
:param target: logging.Handler object to forward
|
||||
LogRecord objects to after translation
|
||||
"""
|
||||
# NOTE(luisg): In order to allow this handler to be a wrapper for
|
||||
# other handlers, such as a FileHandler, and still be able to
|
||||
# configure it using logging.conf, this handler has to extend
|
||||
# MemoryHandler because only the MemoryHandlers' logging.conf
|
||||
# parsing is implemented such that it accepts a target handler.
|
||||
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
|
||||
self.locale = locale
|
||||
|
||||
def setFormatter(self, fmt):
|
||||
self.target.setFormatter(fmt)
|
||||
|
||||
def emit(self, record):
|
||||
# We save the message from the original record to restore it
|
||||
# after translation, so other handlers are not affected by this
|
||||
original_msg = record.msg
|
||||
original_args = record.args
|
||||
|
||||
try:
|
||||
self._translate_and_log_record(record)
|
||||
finally:
|
||||
record.msg = original_msg
|
||||
record.args = original_args
|
||||
|
||||
def _translate_and_log_record(self, record):
|
||||
record.msg = translate(record.msg, self.locale)
|
||||
|
||||
# In addition to translating the message, we also need to translate
|
||||
# arguments that were passed to the log method that were not part
|
||||
# of the main message e.g., log.info(_('Some message %s'), this_one))
|
||||
record.args = _translate_args(record.args, self.locale)
|
||||
|
||||
self.target.emit(record)
|
@ -1,73 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Import related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
"""Returns a class from a string including module and class."""
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
__import__(mod_str)
|
||||
try:
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
except AttributeError:
|
||||
raise ImportError('Class %s cannot be found (%s)' %
|
||||
(class_str,
|
||||
traceback.format_exception(*sys.exc_info())))
|
||||
|
||||
|
||||
def import_object(import_str, *args, **kwargs):
|
||||
"""Import a class and return an instance of it."""
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_object_ns(name_space, import_str, *args, **kwargs):
|
||||
"""Tries to import object from default namespace.
|
||||
|
||||
Imports a class and return an instance of it, first by trying
|
||||
to find the class in a default namespace, then failing back to
|
||||
a full path if not found in the default namespace.
|
||||
"""
|
||||
import_value = "%s.%s" % (name_space, import_str)
|
||||
try:
|
||||
return import_class(import_value)(*args, **kwargs)
|
||||
except ImportError:
|
||||
return import_class(import_str)(*args, **kwargs)
|
||||
|
||||
|
||||
def import_module(import_str):
|
||||
"""Import a module."""
|
||||
__import__(import_str)
|
||||
return sys.modules[import_str]
|
||||
|
||||
|
||||
def import_versioned_module(version, submodule=None):
|
||||
module = 'rackclient.v%s' % version
|
||||
if submodule:
|
||||
module = '.'.join((module, submodule))
|
||||
return import_module(module)
|
||||
|
||||
|
||||
def try_import(import_str, default=None):
|
||||
"""Try to import a module and if it fails return default."""
|
||||
try:
|
||||
return import_module(import_str)
|
||||
except ImportError:
|
||||
return default
|
@ -1,196 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
JSON related utilities.
|
||||
|
||||
This module provides a few things:
|
||||
|
||||
1) A handy function for getting an object down to something that can be
|
||||
JSON serialized. See to_primitive().
|
||||
|
||||
2) Wrappers around loads() and dumps(). The dumps() wrapper will
|
||||
automatically use to_primitive() for you if needed.
|
||||
|
||||
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
|
||||
is available.
|
||||
'''
|
||||
|
||||
|
||||
import codecs
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import sys
|
||||
|
||||
is_simplejson = False
|
||||
if sys.version_info < (2, 7):
|
||||
# On Python <= 2.6, json module is not C boosted, so try to use
|
||||
# simplejson module if available
|
||||
try:
|
||||
import simplejson as json
|
||||
is_simplejson = True
|
||||
except ImportError:
|
||||
import json
|
||||
else:
|
||||
import json
|
||||
|
||||
import six
|
||||
import six.moves.xmlrpc_client as xmlrpclib
|
||||
|
||||
from rackclient.openstack.common import gettextutils
|
||||
from rackclient.openstack.common import importutils
|
||||
from rackclient.openstack.common import strutils
|
||||
from rackclient.openstack.common import timeutils
|
||||
|
||||
netaddr = importutils.try_import("netaddr")
|
||||
|
||||
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
|
||||
inspect.isfunction, inspect.isgeneratorfunction,
|
||||
inspect.isgenerator, inspect.istraceback, inspect.isframe,
|
||||
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
|
||||
inspect.isabstract]
|
||||
|
||||
_simple_types = (six.string_types + six.integer_types
|
||||
+ (type(None), bool, float))
|
||||
|
||||
|
||||
def to_primitive(value, convert_instances=False, convert_datetime=True,
|
||||
level=0, max_depth=3):
|
||||
"""Convert a complex object into primitives.
|
||||
|
||||
Handy for JSON serialization. We can optionally handle instances,
|
||||
but since this is a recursive function, we could have cyclical
|
||||
data structures.
|
||||
|
||||
To handle cyclical data structures we could track the actual objects
|
||||
visited in a set, but not all objects are hashable. Instead we just
|
||||
track the depth of the object inspections and don't go too deep.
|
||||
|
||||
Therefore, convert_instances=True is lossy ... be aware.
|
||||
|
||||
"""
|
||||
# handle obvious types first - order of basic types determined by running
|
||||
# full tests on nova project, resulting in the following counts:
|
||||
# 572754 <type 'NoneType'>
|
||||
# 460353 <type 'int'>
|
||||
# 379632 <type 'unicode'>
|
||||
# 274610 <type 'str'>
|
||||
# 199918 <type 'dict'>
|
||||
# 114200 <type 'datetime.datetime'>
|
||||
# 51817 <type 'bool'>
|
||||
# 26164 <type 'list'>
|
||||
# 6491 <type 'float'>
|
||||
# 283 <type 'tuple'>
|
||||
# 19 <type 'long'>
|
||||
if isinstance(value, _simple_types):
|
||||
return value
|
||||
|
||||
if isinstance(value, datetime.datetime):
|
||||
if convert_datetime:
|
||||
return timeutils.strtime(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
# value of itertools.count doesn't get caught by nasty_type_tests
|
||||
# and results in infinite loop when list(value) is called.
|
||||
if type(value) == itertools.count:
|
||||
return six.text_type(value)
|
||||
|
||||
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
|
||||
# tests that raise an exception in a mocked method that
|
||||
# has a @wrap_exception with a notifier will fail. If
|
||||
# we up the dependency to 0.5.4 (when it is released) we
|
||||
# can remove this workaround.
|
||||
if getattr(value, '__module__', None) == 'mox':
|
||||
return 'mock'
|
||||
|
||||
if level > max_depth:
|
||||
return '?'
|
||||
|
||||
# The try block may not be necessary after the class check above,
|
||||
# but just in case ...
|
||||
try:
|
||||
recursive = functools.partial(to_primitive,
|
||||
convert_instances=convert_instances,
|
||||
convert_datetime=convert_datetime,
|
||||
level=level,
|
||||
max_depth=max_depth)
|
||||
if isinstance(value, dict):
|
||||
return dict((k, recursive(v)) for k, v in six.iteritems(value))
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return [recursive(lv) for lv in value]
|
||||
|
||||
# It's not clear why xmlrpclib created their own DateTime type, but
|
||||
# for our purposes, make it a datetime type which is explicitly
|
||||
# handled
|
||||
if isinstance(value, xmlrpclib.DateTime):
|
||||
value = datetime.datetime(*tuple(value.timetuple())[:6])
|
||||
|
||||
if convert_datetime and isinstance(value, datetime.datetime):
|
||||
return timeutils.strtime(value)
|
||||
elif isinstance(value, gettextutils.Message):
|
||||
return value.data
|
||||
elif hasattr(value, 'iteritems'):
|
||||
return recursive(dict(value.iteritems()), level=level + 1)
|
||||
elif hasattr(value, '__iter__'):
|
||||
return recursive(list(value))
|
||||
elif convert_instances and hasattr(value, '__dict__'):
|
||||
# Likely an instance of something. Watch for cycles.
|
||||
# Ignore class member vars.
|
||||
return recursive(value.__dict__, level=level + 1)
|
||||
elif netaddr and isinstance(value, netaddr.IPAddress):
|
||||
return six.text_type(value)
|
||||
else:
|
||||
if any(test(value) for test in _nasty_type_tests):
|
||||
return six.text_type(value)
|
||||
return value
|
||||
except TypeError:
|
||||
# Class objects are tricky since they may define something like
|
||||
# __iter__ defined but it isn't callable as list().
|
||||
return six.text_type(value)
|
||||
|
||||
|
||||
def dumps(value, default=to_primitive, **kwargs):
|
||||
if is_simplejson:
|
||||
kwargs['namedtuple_as_object'] = False
|
||||
return json.dumps(value, default=default, **kwargs)
|
||||
|
||||
|
||||
def dump(obj, fp, *args, **kwargs):
|
||||
if is_simplejson:
|
||||
kwargs['namedtuple_as_object'] = False
|
||||
return json.dump(obj, fp, *args, **kwargs)
|
||||
|
||||
|
||||
def loads(s, encoding='utf-8', **kwargs):
|
||||
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
|
||||
|
||||
|
||||
def load(fp, encoding='utf-8', **kwargs):
|
||||
return json.load(codecs.getreader(encoding)(fp), **kwargs)
|
||||
|
||||
|
||||
try:
|
||||
import anyjson
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
anyjson._modules.append((__name__, 'dumps', TypeError,
|
||||
'loads', ValueError, 'load'))
|
||||
anyjson.force_implementation(__name__)
|
@ -1,163 +0,0 @@
|
||||
# Copyright 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Network-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from six.moves.urllib import parse
|
||||
|
||||
from rackclient.openstack.common.gettextutils import _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def parse_host_port(address, default_port=None):
|
||||
"""Interpret a string as a host:port pair.
|
||||
|
||||
An IPv6 address MUST be escaped if accompanied by a port,
|
||||
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
|
||||
means both [2001:db8:85a3::8a2e:370:7334] and
|
||||
[2001:db8:85a3::8a2e:370]:7334.
|
||||
|
||||
>>> parse_host_port('server01:80')
|
||||
('server01', 80)
|
||||
>>> parse_host_port('server01')
|
||||
('server01', None)
|
||||
>>> parse_host_port('server01', default_port=1234)
|
||||
('server01', 1234)
|
||||
>>> parse_host_port('[::1]:80')
|
||||
('::1', 80)
|
||||
>>> parse_host_port('[::1]')
|
||||
('::1', None)
|
||||
>>> parse_host_port('[::1]', default_port=1234)
|
||||
('::1', 1234)
|
||||
>>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234)
|
||||
('2001:db8:85a3::8a2e:370:7334', 1234)
|
||||
>>> parse_host_port(None)
|
||||
(None, None)
|
||||
"""
|
||||
if not address:
|
||||
return (None, None)
|
||||
|
||||
if address[0] == '[':
|
||||
# Escaped ipv6
|
||||
_host, _port = address[1:].split(']')
|
||||
host = _host
|
||||
if ':' in _port:
|
||||
port = _port.split(':')[1]
|
||||
else:
|
||||
port = default_port
|
||||
else:
|
||||
if address.count(':') == 1:
|
||||
host, port = address.split(':')
|
||||
else:
|
||||
# 0 means ipv4, >1 means ipv6.
|
||||
# We prohibit unescaped ipv6 addresses with port.
|
||||
host = address
|
||||
port = default_port
|
||||
|
||||
return (host, None if port is None else int(port))
|
||||
|
||||
|
||||
class ModifiedSplitResult(parse.SplitResult):
|
||||
"""Split results class for urlsplit."""
|
||||
|
||||
# NOTE(dims): The functions below are needed for Python 2.6.x.
|
||||
# We can remove these when we drop support for 2.6.x.
|
||||
@property
|
||||
def hostname(self):
|
||||
netloc = self.netloc.split('@', 1)[-1]
|
||||
host, port = parse_host_port(netloc)
|
||||
return host
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
netloc = self.netloc.split('@', 1)[-1]
|
||||
host, port = parse_host_port(netloc)
|
||||
return port
|
||||
|
||||
|
||||
def urlsplit(url, scheme='', allow_fragments=True):
|
||||
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
|
||||
This function papers over Python issue9374 when needed.
|
||||
|
||||
The parameters are the same as urlparse.urlsplit.
|
||||
"""
|
||||
scheme, netloc, path, query, fragment = parse.urlsplit(
|
||||
url, scheme, allow_fragments)
|
||||
if allow_fragments and '#' in path:
|
||||
path, fragment = path.split('#', 1)
|
||||
if '?' in path:
|
||||
path, query = path.split('?', 1)
|
||||
return ModifiedSplitResult(scheme, netloc,
|
||||
path, query, fragment)
|
||||
|
||||
|
||||
def set_tcp_keepalive(sock, tcp_keepalive=True,
|
||||
tcp_keepidle=None,
|
||||
tcp_keepalive_interval=None,
|
||||
tcp_keepalive_count=None):
|
||||
"""Set values for tcp keepalive parameters
|
||||
|
||||
This function configures tcp keepalive parameters if users wish to do
|
||||
so.
|
||||
|
||||
:param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
|
||||
not sure, this should be True, and default values will be used.
|
||||
|
||||
:param tcp_keepidle: time to wait before starting to send keepalive probes
|
||||
:param tcp_keepalive_interval: time between successive probes, once the
|
||||
initial wait time is over
|
||||
:param tcp_keepalive_count: number of probes to send before the connection
|
||||
is killed
|
||||
"""
|
||||
|
||||
# NOTE(praneshp): Despite keepalive being a tcp concept, the level is
|
||||
# still SOL_SOCKET. This is a quirk.
|
||||
if isinstance(tcp_keepalive, bool):
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive)
|
||||
else:
|
||||
raise TypeError("tcp_keepalive must be a boolean")
|
||||
|
||||
if not tcp_keepalive:
|
||||
return
|
||||
|
||||
# These options aren't available in the OS X version of eventlet,
|
||||
# Idle + Count * Interval effectively gives you the total timeout.
|
||||
if tcp_keepidle is not None:
|
||||
if hasattr(socket, 'TCP_KEEPIDLE'):
|
||||
sock.setsockopt(socket.IPPROTO_TCP,
|
||||
socket.TCP_KEEPIDLE,
|
||||
tcp_keepidle)
|
||||
else:
|
||||
LOG.warning(_LW('tcp_keepidle not available on your system'))
|
||||
if tcp_keepalive_interval is not None:
|
||||
if hasattr(socket, 'TCP_KEEPINTVL'):
|
||||
sock.setsockopt(socket.IPPROTO_TCP,
|
||||
socket.TCP_KEEPINTVL,
|
||||
tcp_keepalive_interval)
|
||||
else:
|
||||
LOG.warning(_LW('tcp_keepintvl not available on your system'))
|
||||
if tcp_keepalive_count is not None:
|
||||
if hasattr(socket, 'TCP_KEEPCNT'):
|
||||
sock.setsockopt(socket.IPPROTO_TCP,
|
||||
socket.TCP_KEEPCNT,
|
||||
tcp_keepalive_count)
|
||||
else:
|
||||
LOG.warning(_LW('tcp_keepknt not available on your system'))
|
@ -1,311 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
System-level utilities and helper functions.
|
||||
"""
|
||||
|
||||
import math
|
||||
import re
|
||||
import sys
|
||||
import unicodedata
|
||||
|
||||
import six
|
||||
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
UNIT_PREFIX_EXPONENT = {
|
||||
'k': 1,
|
||||
'K': 1,
|
||||
'Ki': 1,
|
||||
'M': 2,
|
||||
'Mi': 2,
|
||||
'G': 3,
|
||||
'Gi': 3,
|
||||
'T': 4,
|
||||
'Ti': 4,
|
||||
}
|
||||
UNIT_SYSTEM_INFO = {
|
||||
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
|
||||
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
|
||||
}
|
||||
|
||||
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
|
||||
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
|
||||
|
||||
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
|
||||
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
|
||||
|
||||
|
||||
# NOTE(flaper87): The following globals are used by `mask_password`
|
||||
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
|
||||
|
||||
# NOTE(ldbragst): Let's build a list of regex objects using the list of
|
||||
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
|
||||
# to the list of _SANITIZE_KEYS and we can generate regular expressions
|
||||
# for XML and JSON automatically.
|
||||
_SANITIZE_PATTERNS_2 = []
|
||||
_SANITIZE_PATTERNS_1 = []
|
||||
|
||||
# NOTE(amrith): Some regular expressions have only one parameter, some
|
||||
# have two parameters. Use different lists of patterns here.
|
||||
_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+']
|
||||
_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
|
||||
r'(%(key)s\s+[\"\']).*?([\"\'])',
|
||||
r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)',
|
||||
r'(<%(key)s>).*?(</%(key)s>)',
|
||||
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
|
||||
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
|
||||
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?'
|
||||
'[\'"]).*?([\'"])',
|
||||
r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
|
||||
|
||||
for key in _SANITIZE_KEYS:
|
||||
for pattern in _FORMAT_PATTERNS_2:
|
||||
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||
_SANITIZE_PATTERNS_2.append(reg_ex)
|
||||
|
||||
for pattern in _FORMAT_PATTERNS_1:
|
||||
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
|
||||
_SANITIZE_PATTERNS_1.append(reg_ex)
|
||||
|
||||
|
||||
def int_from_bool_as_string(subject):
|
||||
"""Interpret a string as a boolean and return either 1 or 0.
|
||||
|
||||
Any string value in:
|
||||
|
||||
('True', 'true', 'On', 'on', '1')
|
||||
|
||||
is interpreted as a boolean True.
|
||||
|
||||
Useful for JSON-decoded stuff and config file parsing
|
||||
"""
|
||||
return bool_from_string(subject) and 1 or 0
|
||||
|
||||
|
||||
def bool_from_string(subject, strict=False, default=False):
|
||||
"""Interpret a string as a boolean.
|
||||
|
||||
A case-insensitive match is performed such that strings matching 't',
|
||||
'true', 'on', 'y', 'yes', or '1' are considered True and, when
|
||||
`strict=False`, anything else returns the value specified by 'default'.
|
||||
|
||||
Useful for JSON-decoded stuff and config file parsing.
|
||||
|
||||
If `strict=True`, unrecognized values, including None, will raise a
|
||||
ValueError which is useful when parsing values passed in from an API call.
|
||||
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
|
||||
"""
|
||||
if not isinstance(subject, six.string_types):
|
||||
subject = six.text_type(subject)
|
||||
|
||||
lowered = subject.strip().lower()
|
||||
|
||||
if lowered in TRUE_STRINGS:
|
||||
return True
|
||||
elif lowered in FALSE_STRINGS:
|
||||
return False
|
||||
elif strict:
|
||||
acceptable = ', '.join(
|
||||
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
|
||||
msg = _("Unrecognized value '%(val)s', acceptable values are:"
|
||||
" %(acceptable)s") % {'val': subject,
|
||||
'acceptable': acceptable}
|
||||
raise ValueError(msg)
|
||||
else:
|
||||
return default
|
||||
|
||||
|
||||
def safe_decode(text, incoming=None, errors='strict'):
|
||||
"""Decodes incoming text/bytes string using `incoming` if they're not
|
||||
already unicode.
|
||||
|
||||
:param incoming: Text's current encoding
|
||||
:param errors: Errors handling policy. See here for valid
|
||||
values http://docs.python.org/2/library/codecs.html
|
||||
:returns: text or a unicode `incoming` encoded
|
||||
representation of it.
|
||||
:raises TypeError: If text is not an instance of str
|
||||
"""
|
||||
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||
raise TypeError("%s can't be decoded" % type(text))
|
||||
|
||||
if isinstance(text, six.text_type):
|
||||
return text
|
||||
|
||||
if not incoming:
|
||||
incoming = (sys.stdin.encoding or
|
||||
sys.getdefaultencoding())
|
||||
|
||||
try:
|
||||
return text.decode(incoming, errors)
|
||||
except UnicodeDecodeError:
|
||||
# Note(flaper87) If we get here, it means that
|
||||
# sys.stdin.encoding / sys.getdefaultencoding
|
||||
# didn't return a suitable encoding to decode
|
||||
# text. This happens mostly when global LANG
|
||||
# var is not set correctly and there's no
|
||||
# default encoding. In this case, most likely
|
||||
# python will use ASCII or ANSI encoders as
|
||||
# default encodings but they won't be capable
|
||||
# of decoding non-ASCII characters.
|
||||
#
|
||||
# Also, UTF-8 is being used since it's an ASCII
|
||||
# extension.
|
||||
return text.decode('utf-8', errors)
|
||||
|
||||
|
||||
def safe_encode(text, incoming=None,
|
||||
encoding='utf-8', errors='strict'):
|
||||
"""Encodes incoming text/bytes string using `encoding`.
|
||||
|
||||
If incoming is not specified, text is expected to be encoded with
|
||||
current python's default encoding. (`sys.getdefaultencoding`)
|
||||
|
||||
:param incoming: Text's current encoding
|
||||
:param encoding: Expected encoding for text (Default UTF-8)
|
||||
:param errors: Errors handling policy. See here for valid
|
||||
values http://docs.python.org/2/library/codecs.html
|
||||
:returns: text or a bytestring `encoding` encoded
|
||||
representation of it.
|
||||
:raises TypeError: If text is not an instance of str
|
||||
"""
|
||||
if not isinstance(text, (six.string_types, six.binary_type)):
|
||||
raise TypeError("%s can't be encoded" % type(text))
|
||||
|
||||
if not incoming:
|
||||
incoming = (sys.stdin.encoding or
|
||||
sys.getdefaultencoding())
|
||||
|
||||
if isinstance(text, six.text_type):
|
||||
return text.encode(encoding, errors)
|
||||
elif text and encoding != incoming:
|
||||
# Decode text before encoding it with `encoding`
|
||||
text = safe_decode(text, incoming, errors)
|
||||
return text.encode(encoding, errors)
|
||||
else:
|
||||
return text
|
||||
|
||||
|
||||
def string_to_bytes(text, unit_system='IEC', return_int=False):
|
||||
"""Converts a string into an float representation of bytes.
|
||||
|
||||
The units supported for IEC ::
|
||||
|
||||
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
|
||||
KB, KiB, MB, MiB, GB, GiB, TB, TiB
|
||||
|
||||
The units supported for SI ::
|
||||
|
||||
kb(it), Mb(it), Gb(it), Tb(it)
|
||||
kB, MB, GB, TB
|
||||
|
||||
Note that the SI unit system does not support capital letter 'K'
|
||||
|
||||
:param text: String input for bytes size conversion.
|
||||
:param unit_system: Unit system for byte size conversion.
|
||||
:param return_int: If True, returns integer representation of text
|
||||
in bytes. (default: decimal)
|
||||
:returns: Numerical representation of text in bytes.
|
||||
:raises ValueError: If text has an invalid value.
|
||||
|
||||
"""
|
||||
try:
|
||||
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
|
||||
except KeyError:
|
||||
msg = _('Invalid unit system: "%s"') % unit_system
|
||||
raise ValueError(msg)
|
||||
match = reg_ex.match(text)
|
||||
if match:
|
||||
magnitude = float(match.group(1))
|
||||
unit_prefix = match.group(2)
|
||||
if match.group(3) in ['b', 'bit']:
|
||||
magnitude /= 8
|
||||
else:
|
||||
msg = _('Invalid string format: %s') % text
|
||||
raise ValueError(msg)
|
||||
if not unit_prefix:
|
||||
res = magnitude
|
||||
else:
|
||||
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
|
||||
if return_int:
|
||||
return int(math.ceil(res))
|
||||
return res
|
||||
|
||||
|
||||
def to_slug(value, incoming=None, errors="strict"):
|
||||
"""Normalize string.
|
||||
|
||||
Convert to lowercase, remove non-word characters, and convert spaces
|
||||
to hyphens.
|
||||
|
||||
Inspired by Django's `slugify` filter.
|
||||
|
||||
:param value: Text to slugify
|
||||
:param incoming: Text's current encoding
|
||||
:param errors: Errors handling policy. See here for valid
|
||||
values http://docs.python.org/2/library/codecs.html
|
||||
:returns: slugified unicode representation of `value`
|
||||
:raises TypeError: If text is not an instance of str
|
||||
"""
|
||||
value = safe_decode(value, incoming, errors)
|
||||
# NOTE(aababilov): no need to use safe_(encode|decode) here:
|
||||
# encodings are always "ascii", error handling is always "ignore"
|
||||
# and types are always known (first: unicode; second: str)
|
||||
value = unicodedata.normalize("NFKD", value).encode(
|
||||
"ascii", "ignore").decode("ascii")
|
||||
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
|
||||
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
||||
|
||||
|
||||
def mask_password(message, secret="***"):
|
||||
"""Replace password with 'secret' in message.
|
||||
|
||||
:param message: The string which includes security information.
|
||||
:param secret: value with which to replace passwords.
|
||||
:returns: The unicode value of message with the password fields masked.
|
||||
|
||||
For example:
|
||||
|
||||
>>> mask_password("'adminPass' : 'aaaaa'")
|
||||
"'adminPass' : '***'"
|
||||
>>> mask_password("'admin_pass' : 'aaaaa'")
|
||||
"'admin_pass' : '***'"
|
||||
>>> mask_password('"password" : "aaaaa"')
|
||||
'"password" : "***"'
|
||||
>>> mask_password("'original_password' : 'aaaaa'")
|
||||
"'original_password' : '***'"
|
||||
>>> mask_password("u'original_password' : u'aaaaa'")
|
||||
"u'original_password' : u'***'"
|
||||
"""
|
||||
message = six.text_type(message)
|
||||
|
||||
# NOTE(ldbragst): Check to see if anything in message contains any key
|
||||
# specified in _SANITIZE_KEYS, if not then just return the message since
|
||||
# we don't have to mask any passwords.
|
||||
if not any(key in message for key in _SANITIZE_KEYS):
|
||||
return message
|
||||
|
||||
substitute = r'\g<1>' + secret + r'\g<2>'
|
||||
for pattern in _SANITIZE_PATTERNS_2:
|
||||
message = re.sub(pattern, substitute, message)
|
||||
|
||||
substitute = r'\g<1>' + secret
|
||||
for pattern in _SANITIZE_PATTERNS_1:
|
||||
message = re.sub(pattern, substitute, message)
|
||||
|
||||
return message
|
@ -1,210 +0,0 @@
|
||||
# Copyright 2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Time related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
import time
|
||||
|
||||
import iso8601
|
||||
import six
|
||||
|
||||
|
||||
# ISO 8601 extended time format with microseconds
|
||||
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
|
||||
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
|
||||
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
|
||||
|
||||
|
||||
def isotime(at=None, subsecond=False):
|
||||
"""Stringify time in ISO 8601 format."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
st = at.strftime(_ISO8601_TIME_FORMAT
|
||||
if not subsecond
|
||||
else _ISO8601_TIME_FORMAT_SUBSECOND)
|
||||
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
|
||||
st += ('Z' if tz == 'UTC' else tz)
|
||||
return st
|
||||
|
||||
|
||||
def parse_isotime(timestr):
|
||||
"""Parse time from ISO 8601 format."""
|
||||
try:
|
||||
return iso8601.parse_date(timestr)
|
||||
except iso8601.ParseError as e:
|
||||
raise ValueError(six.text_type(e))
|
||||
except TypeError as e:
|
||||
raise ValueError(six.text_type(e))
|
||||
|
||||
|
||||
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Returns formatted utcnow."""
|
||||
if not at:
|
||||
at = utcnow()
|
||||
return at.strftime(fmt)
|
||||
|
||||
|
||||
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
|
||||
"""Turn a formatted time back into a datetime."""
|
||||
return datetime.datetime.strptime(timestr, fmt)
|
||||
|
||||
|
||||
def normalize_time(timestamp):
|
||||
"""Normalize time in arbitrary timezone to UTC naive object."""
|
||||
offset = timestamp.utcoffset()
|
||||
if offset is None:
|
||||
return timestamp
|
||||
return timestamp.replace(tzinfo=None) - offset
|
||||
|
||||
|
||||
def is_older_than(before, seconds):
|
||||
"""Return True if before is older than seconds."""
|
||||
if isinstance(before, six.string_types):
|
||||
before = parse_strtime(before).replace(tzinfo=None)
|
||||
else:
|
||||
before = before.replace(tzinfo=None)
|
||||
|
||||
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def is_newer_than(after, seconds):
|
||||
"""Return True if after is newer than seconds."""
|
||||
if isinstance(after, six.string_types):
|
||||
after = parse_strtime(after).replace(tzinfo=None)
|
||||
else:
|
||||
after = after.replace(tzinfo=None)
|
||||
|
||||
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
||||
|
||||
|
||||
def utcnow_ts():
|
||||
"""Timestamp version of our utcnow function."""
|
||||
if utcnow.override_time is None:
|
||||
# NOTE(kgriffs): This is several times faster
|
||||
# than going through calendar.timegm(...)
|
||||
return int(time.time())
|
||||
|
||||
return calendar.timegm(utcnow().timetuple())
|
||||
|
||||
|
||||
def utcnow():
|
||||
"""Overridable version of utils.utcnow."""
|
||||
if utcnow.override_time:
|
||||
try:
|
||||
return utcnow.override_time.pop(0)
|
||||
except AttributeError:
|
||||
return utcnow.override_time
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def iso8601_from_timestamp(timestamp):
|
||||
"""Returns an iso8601 formatted date from timestamp."""
|
||||
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
|
||||
|
||||
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def set_time_override(override_time=None):
|
||||
"""Overrides utils.utcnow.
|
||||
|
||||
Make it return a constant time or a list thereof, one at a time.
|
||||
|
||||
:param override_time: datetime instance or list thereof. If not
|
||||
given, defaults to the current UTC time.
|
||||
"""
|
||||
utcnow.override_time = override_time or datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def advance_time_delta(timedelta):
|
||||
"""Advance overridden time using a datetime.timedelta."""
|
||||
assert utcnow.override_time is not None
|
||||
try:
|
||||
for dt in utcnow.override_time:
|
||||
dt += timedelta
|
||||
except TypeError:
|
||||
utcnow.override_time += timedelta
|
||||
|
||||
|
||||
def advance_time_seconds(seconds):
|
||||
"""Advance overridden time by seconds."""
|
||||
advance_time_delta(datetime.timedelta(0, seconds))
|
||||
|
||||
|
||||
def clear_time_override():
|
||||
"""Remove the overridden time."""
|
||||
utcnow.override_time = None
|
||||
|
||||
|
||||
def marshall_now(now=None):
|
||||
"""Make an rpc-safe datetime with microseconds.
|
||||
|
||||
Note: tzinfo is stripped, but not required for relative times.
|
||||
"""
|
||||
if not now:
|
||||
now = utcnow()
|
||||
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
|
||||
minute=now.minute, second=now.second,
|
||||
microsecond=now.microsecond)
|
||||
|
||||
|
||||
def unmarshall_time(tyme):
|
||||
"""Unmarshall a datetime dict."""
|
||||
return datetime.datetime(day=tyme['day'],
|
||||
month=tyme['month'],
|
||||
year=tyme['year'],
|
||||
hour=tyme['hour'],
|
||||
minute=tyme['minute'],
|
||||
second=tyme['second'],
|
||||
microsecond=tyme['microsecond'])
|
||||
|
||||
|
||||
def delta_seconds(before, after):
|
||||
"""Return the difference between two timing objects.
|
||||
|
||||
Compute the difference in seconds between two date, time, or
|
||||
datetime objects (as a float, to microsecond resolution).
|
||||
"""
|
||||
delta = after - before
|
||||
return total_seconds(delta)
|
||||
|
||||
|
||||
def total_seconds(delta):
|
||||
"""Return the total seconds of datetime.timedelta object.
|
||||
|
||||
Compute total seconds of datetime.timedelta, datetime.timedelta
|
||||
doesn't have method total_seconds in Python2.6, calculate it manually.
|
||||
"""
|
||||
try:
|
||||
return delta.total_seconds()
|
||||
except AttributeError:
|
||||
return ((delta.days * 24 * 3600) + delta.seconds +
|
||||
float(delta.microseconds) / (10 ** 6))
|
||||
|
||||
|
||||
def is_soon(dt, window):
|
||||
"""Determines if time is going to happen in the next window seconds.
|
||||
|
||||
:param dt: the time
|
||||
:param window: minimum seconds to remain to consider the time not soon
|
||||
|
||||
:return: True if expiration is within the given duration
|
||||
"""
|
||||
soon = (utcnow() + datetime.timedelta(seconds=window))
|
||||
return normalize_time(dt) <= soon
|
@ -1,37 +0,0 @@
|
||||
# Copyright (c) 2012 Intel Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
UUID related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
|
||||
|
||||
def generate_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def is_uuid_like(val):
|
||||
"""Returns validation of a value as a UUID.
|
||||
|
||||
For our purposes, a UUID is a canonical form string:
|
||||
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
|
||||
|
||||
"""
|
||||
try:
|
||||
return str(uuid.UUID(val)) == val
|
||||
except (TypeError, ValueError, AttributeError):
|
||||
return False
|
@ -1,105 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from cliff.app import App
|
||||
from cliff.commandmanager import CommandManager
|
||||
import requests
|
||||
|
||||
from rackclient import exceptions
|
||||
|
||||
VERSION = '1'
|
||||
|
||||
|
||||
class RackShell(App):
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __init__(self):
|
||||
super(RackShell, self).__init__(
|
||||
description='rack shell',
|
||||
version=VERSION,
|
||||
command_manager=CommandManager('rack.command'),
|
||||
)
|
||||
|
||||
def build_option_parser(self, description, version,
|
||||
argparse_kwargs=None):
|
||||
parser = super(RackShell, self).build_option_parser(
|
||||
description, version, argparse_kwargs)
|
||||
|
||||
parser.add_argument(
|
||||
'--rack-api-version',
|
||||
metavar='<api-verion>',
|
||||
default=os.environ.get('RACK_API_VERSION', VERSION),
|
||||
help=('Accepts only 1, '
|
||||
'defaults to env[RACK_API_VERSION].')
|
||||
)
|
||||
parser.add_argument(
|
||||
'--rack-url',
|
||||
metavar='<rack-url>',
|
||||
default=os.environ.get('RACK_URL', ''),
|
||||
help='Defaults to env[RACK_URL].'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--gid',
|
||||
metavar='<gid>',
|
||||
default=os.environ.get('RACK_GID', ''),
|
||||
help='Defaults to env[RACK_GID].'
|
||||
)
|
||||
|
||||
return parser
|
||||
|
||||
def configure_logging(self):
|
||||
super(RackShell, self).configure_logging()
|
||||
|
||||
rlogger = logging.getLogger(requests.__name__)
|
||||
rlogger.setLevel(logging.WARNING)
|
||||
|
||||
def initialize_app(self, argv):
|
||||
self.check_options()
|
||||
|
||||
def check_options(self):
|
||||
if self.options.rack_api_version != '1':
|
||||
raise exceptions.CommandError(
|
||||
"'rack-api-version' must be 1")
|
||||
|
||||
if not self.options.rack_url:
|
||||
raise exceptions.CommandError(
|
||||
"You must provide an RACK url "
|
||||
"via either --rack-url or env[RACK_URL]")
|
||||
|
||||
def prepare_to_run_command(self, cmd):
|
||||
commands = ['HelpCommand', 'ListGroups', 'ShowGroup',
|
||||
'CreateGroup', 'UpdateGroup', 'DeleteGroup',
|
||||
'InitGroup']
|
||||
if cmd.__class__.__name__ not in commands:
|
||||
if not self.options.gid:
|
||||
raise exceptions.CommandError(
|
||||
"You must provide a gid "
|
||||
"via either --gid or env[RACK_GID]")
|
||||
|
||||
def clean_up(self, cmd, result, err):
|
||||
if err:
|
||||
self.log.debug('got an error: %s', err)
|
||||
|
||||
|
||||
def main(argv=sys.argv[1:]):
|
||||
app = RackShell()
|
||||
return app.run(argv)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
@ -1 +0,0 @@
|
||||
|
@ -1,64 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class FakeClient(object):
|
||||
|
||||
def assert_called(self, method, url, body=None, pos=-1):
|
||||
"""
|
||||
Assert than an API method was just called.
|
||||
"""
|
||||
expected = (method, url)
|
||||
called = self.client.callstack[pos][0:2]
|
||||
|
||||
assert self.client.callstack, \
|
||||
"Expected %s %s but no calls were made." % expected
|
||||
|
||||
assert expected == called, \
|
||||
'Expected %s %s; got %s %s' % (expected + called)
|
||||
|
||||
if body is not None:
|
||||
if self.client.callstack[pos][2] != body:
|
||||
raise AssertionError('%r != %r' %
|
||||
(self.client.callstack[pos][2], body))
|
||||
|
||||
def assert_called_anytime(self, method, url, body=None):
|
||||
"""
|
||||
Assert than an API method was called anytime in the test.
|
||||
"""
|
||||
expected = (method, url)
|
||||
|
||||
assert self.client.callstack, \
|
||||
"Expected %s %s but no calls were made." % expected
|
||||
|
||||
found = False
|
||||
for entry in self.client.callstack:
|
||||
if expected == entry[0:2]:
|
||||
found = True
|
||||
break
|
||||
|
||||
assert found, 'Expected %s; got %s' % (expected, self.client.callstack)
|
||||
if body is not None:
|
||||
try:
|
||||
assert entry[2] == body
|
||||
except AssertionError:
|
||||
print(entry[2])
|
||||
print("!=")
|
||||
print(body)
|
||||
raise
|
||||
|
||||
self.client.callstack = []
|
||||
|
||||
def clear_callstack(self):
|
||||
self.client.callstack = []
|
@ -1,164 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from swiftclient import exceptions as swift_exc
|
||||
|
||||
from mock import patch
|
||||
from rackclient.tests import utils
|
||||
from rackclient.lib.syscall.default import file as rackfile
|
||||
from rackclient.exceptions import InvalidFSEndpointError
|
||||
from rackclient.exceptions import InvalidDirectoryError
|
||||
from rackclient.exceptions import InvalidFilePathError
|
||||
from rackclient.exceptions import FileSystemAccessError
|
||||
|
||||
|
||||
class FileTest(utils.LibTestCase):
|
||||
|
||||
def target_context(self):
|
||||
return "syscall.default.file"
|
||||
|
||||
def setUp(self):
|
||||
super(FileTest, self).setUp()
|
||||
patcher = patch('swiftclient.client.Connection', autospec=True)
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_conn = patcher.start()
|
||||
self.mock_conn.return_value.get_auth.return_value = 'fake', 'fake'
|
||||
|
||||
def test_get_swift_client(self):
|
||||
self.mock_conn.return_value.get_auth.return_value = 'fake', 'fake'
|
||||
rackfile._get_swift_client()
|
||||
expected = {
|
||||
"user": "rack:admin",
|
||||
"key": "admin",
|
||||
"authurl": "http://10.0.0.2:8080/auth/v1.0"
|
||||
}
|
||||
self.mock_conn.assert_any_call(**expected)
|
||||
self.mock_conn.assert_any_call(preauthurl='fake', preauthtoken='fake')
|
||||
|
||||
def test_get_swift_client_fs_endpoint(self):
|
||||
endpoint = ('{"os_username": "user", '
|
||||
'"os_password": "password", '
|
||||
'"os_tenant_name": "tenant", '
|
||||
'"os_auth_url": "http://www.example.com:5000/v2.0"}')
|
||||
self.mock_RACK_CTX.fs_endpoint = endpoint
|
||||
rackfile._get_swift_client()
|
||||
expected = {
|
||||
"user": 'user',
|
||||
"key": 'password',
|
||||
"tenant_name": 'tenant',
|
||||
"authurl": 'http://www.example.com:5000/v2.0',
|
||||
"auth_version": "2"
|
||||
}
|
||||
self.mock_conn.assert_any_call(**expected)
|
||||
|
||||
def test_get_swift_client_invalid_fs_endpoint_error(self):
|
||||
self.mock_RACK_CTX.fs_endpoint = 'invalid'
|
||||
self.assertRaises(InvalidFSEndpointError, rackfile._get_swift_client)
|
||||
|
||||
def test_listdir(self):
|
||||
self.mock_conn.return_value.get_container.return_value = \
|
||||
None, [{'name': 'file1'}, {'name': 'file2'}]
|
||||
files = rackfile.listdir('/dir')
|
||||
|
||||
self.mock_conn.return_value.get_container.assert_called_with('dir')
|
||||
self.assertEqual('/dir/file1', files[0].path)
|
||||
self.assertEqual('/dir/file2', files[1].path)
|
||||
|
||||
def test_listdir_invalid_directory_error(self):
|
||||
self.mock_conn.return_value.get_container.side_effect = \
|
||||
swift_exc.ClientException('', http_status=404)
|
||||
self.assertRaises(InvalidDirectoryError, rackfile.listdir, 'dir')
|
||||
|
||||
def test_listdir_filesystem_error(self):
|
||||
self.mock_conn.return_value.get_container.side_effect = \
|
||||
swift_exc.ClientException('', http_status=500)
|
||||
self.assertRaises(FileSystemAccessError, rackfile.listdir, 'dir')
|
||||
|
||||
def test_file_read_mode(self):
|
||||
self.mock_conn.return_value.get_object.return_value = \
|
||||
None, 'example text'
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
f.load()
|
||||
|
||||
self.mock_conn.return_value.get_object.assert_called_with('dir1',
|
||||
'dir2/file.txt', None)
|
||||
self.assertEqual('example text', f.read())
|
||||
|
||||
f.load()
|
||||
call_count = self.mock_conn.return_value.get_object.call_count
|
||||
self.assertEqual(1, call_count)
|
||||
|
||||
f.close()
|
||||
|
||||
def test_file_read_mode_with_chunk_size(self):
|
||||
def _content():
|
||||
for i in ['11111111', '22222222']:
|
||||
yield i
|
||||
|
||||
self.mock_conn.return_value.get_object.return_value = \
|
||||
None, _content()
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
f.load(8)
|
||||
|
||||
self.mock_conn.return_value.get_object.assert_called_with('dir1',
|
||||
'dir2/file.txt', 8)
|
||||
self.assertEqual('1111111122222222', f.read())
|
||||
f.close()
|
||||
|
||||
def test_file_load_invalid_file_path_error(self):
|
||||
self.mock_conn.return_value.get_object.side_effect = \
|
||||
swift_exc.ClientException('', http_status=404)
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
self.assertRaises(InvalidFilePathError, f.load)
|
||||
|
||||
def test_file_load_filesystem_error(self):
|
||||
self.mock_conn.return_value.get_object.side_effect = \
|
||||
swift_exc.ClientException('')
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
self.assertRaises(FileSystemAccessError, f.load)
|
||||
|
||||
def test_file_write_mode(self):
|
||||
f = rackfile.File('/dir1/dir2/file.txt', mode='w')
|
||||
f.write('example text')
|
||||
f.close()
|
||||
|
||||
self.mock_conn.return_value.put_container.assert_called_with('dir1')
|
||||
self.mock_conn.return_value.put_object.assert_called_with(
|
||||
'dir1', 'dir2/file.txt', f.file)
|
||||
|
||||
def test_file_close_invalid_directory_error(self):
|
||||
self.mock_conn.return_value.put_object.side_effect = \
|
||||
swift_exc.ClientException('', http_status=404)
|
||||
f = rackfile.File('/dir1/dir2/file.txt', mode='w')
|
||||
f.write('example text')
|
||||
self.assertRaises(InvalidDirectoryError, f.close)
|
||||
|
||||
def test_file_close_invalid_filesystem_error(self):
|
||||
self.mock_conn.return_value.put_container.side_effect = \
|
||||
swift_exc.ClientException('')
|
||||
f = rackfile.File('/dir1/dir2/file.txt', mode='w')
|
||||
f.write('example text')
|
||||
self.assertRaises(FileSystemAccessError, f.close)
|
||||
|
||||
def test_file_attriute_error(self):
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
try:
|
||||
f.invalid
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
self.fail("Expected 'AttributeError'.")
|
||||
|
||||
def test_file_invalid_mod(self):
|
||||
self.assertRaises(ValueError, rackfile.File, '/dir1/dir2/file.txt',
|
||||
'invalid_mode')
|
@ -1,199 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import copy
|
||||
import cPickle
|
||||
|
||||
import pika
|
||||
|
||||
from mock import patch, Mock
|
||||
from rackclient import exceptions
|
||||
from rackclient.tests import utils
|
||||
from rackclient.lib.syscall.default import messaging as rack_ipc
|
||||
|
||||
|
||||
class MessagingTest(utils.LibTestCase):
|
||||
|
||||
def target_context(self):
|
||||
return "syscall.default.messaging"
|
||||
|
||||
def setUp(self):
|
||||
super(MessagingTest, self).setUp()
|
||||
self.mock_connection = Mock()
|
||||
self.mock_channel = Mock()
|
||||
self.mock_receive = Mock(spec=rack_ipc.Messaging.Receive)
|
||||
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
|
||||
# self.addCleanup(self.patch_pika_blocking.stop)
|
||||
self.mock_pika_blocking = self.patch_pika_blocking.start()
|
||||
self.mock_pika_blocking.return_value = self.mock_connection
|
||||
self.mock_connection.channel.return_value = self.mock_channel
|
||||
|
||||
def tearDown(self):
|
||||
super(MessagingTest, self).tearDown()
|
||||
self.patch_pika_blocking.stop()
|
||||
|
||||
def test_declare_queue(self):
|
||||
queue_name = 'test_queue_name'
|
||||
msg = rack_ipc.Messaging()
|
||||
msg.declare_queue(queue_name)
|
||||
|
||||
self.mock_channel.\
|
||||
exchange_declare.assert_called_with(exchange=self.mock_RACK_CTX.gid,
|
||||
type='topic')
|
||||
self.mock_channel.queue_declare.assert_called_with(queue=queue_name)
|
||||
r_key = self.mock_RACK_CTX.gid + '.' + queue_name
|
||||
self.mock_channel.queue_bind.assert_called_with(exchange=self.mock_RACK_CTX.gid,
|
||||
queue=queue_name,
|
||||
routing_key=r_key)
|
||||
|
||||
@patch('rackclient.lib.syscall.default.messaging.Messaging.Receive')
|
||||
def test_receive_all_msg(self, mock_receive):
|
||||
timeout_limit = 123
|
||||
msg = rack_ipc.Messaging()
|
||||
msg_list = msg.receive_all_msg(timeout_limit=timeout_limit)
|
||||
|
||||
self.mock_connection.add_timeout.\
|
||||
assert_called_with(deadline=timeout_limit,
|
||||
callback_method=mock_receive().time_out)
|
||||
self.mock_channel.\
|
||||
basic_consume.assert_called_with(mock_receive().get_all_msg,
|
||||
queue=self.mock_RACK_CTX.pid,
|
||||
no_ack=False)
|
||||
self.mock_channel.start_consuming.assert_called_with()
|
||||
self.assertEqual(msg_list, mock_receive().message_list)
|
||||
|
||||
@patch('rackclient.lib.syscall.default.messaging.Messaging.Receive')
|
||||
def test_receive_msg(self, mock_receive):
|
||||
timeout_limit = 123
|
||||
msg = rack_ipc.Messaging()
|
||||
message = msg.receive_msg(timeout_limit=timeout_limit)
|
||||
|
||||
self.mock_connection.add_timeout.\
|
||||
assert_called_with(deadline=timeout_limit,
|
||||
callback_method=mock_receive().time_out)
|
||||
self.mock_channel.\
|
||||
basic_consume.assert_called_with(mock_receive().get_msg,
|
||||
queue=self.mock_RACK_CTX.pid,
|
||||
no_ack=False)
|
||||
self.mock_channel.start_consuming.assert_called_with()
|
||||
self.assertEqual(message, mock_receive().message)
|
||||
|
||||
def test_send_msg(self):
|
||||
send_msg = 'test_msg'
|
||||
target = 'test_pid'
|
||||
msg = rack_ipc.Messaging()
|
||||
msg.send_msg(target,
|
||||
message=send_msg)
|
||||
routing_key = self.mock_RACK_CTX.gid + '.' + target
|
||||
send_dict = {'pid': self.mock_RACK_CTX.pid,
|
||||
'message': send_msg}
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
self.mock_channel.\
|
||||
basic_publish.assert_called_with(exchange=self.mock_RACK_CTX.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
def test_send_msg_no_message(self):
|
||||
msg = rack_ipc.Messaging()
|
||||
target = 'test_pid'
|
||||
msg.send_msg(target)
|
||||
routing_key = self.mock_RACK_CTX.gid + '.' + target
|
||||
send_dict = {'pid': self.mock_RACK_CTX.pid}
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
|
||||
self.mock_channel.\
|
||||
basic_publish.assert_called_with(exchange=self.mock_RACK_CTX.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
def test_receive_get_all_msg(self):
|
||||
ch = Mock()
|
||||
method = Mock()
|
||||
properties = Mock()
|
||||
receive_msg = 'receive_msg'
|
||||
body = cPickle.dumps(receive_msg)
|
||||
ch_object = {'delivery_tag': 'delivery_tag'}
|
||||
method.configure_mock(**ch_object)
|
||||
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.get_all_msg(ch, method, properties, body)
|
||||
|
||||
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
|
||||
self.assertEqual(receive.message_list[0], receive_msg)
|
||||
|
||||
def test_receive_get_all_msg_count_limit(self):
|
||||
ch = Mock()
|
||||
method = Mock()
|
||||
properties = Mock()
|
||||
message_list = [{'pid': 'child_pid1'},
|
||||
{'pid': 'child_pid2'}]
|
||||
expected_message_list = copy.deepcopy(message_list)
|
||||
receive_msg = {'pid': 'child_pid3'}
|
||||
expected_message_list.append(receive_msg)
|
||||
body = cPickle.dumps(receive_msg)
|
||||
ch_object = {'delivery_tag': 'delivery_tag'}
|
||||
method.configure_mock(**ch_object)
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.message_list = message_list
|
||||
receive.msg_count_limit = 3
|
||||
|
||||
receive.get_all_msg(ch, method, properties, body)
|
||||
|
||||
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
|
||||
ch.stop_consuming.assert_called_with()
|
||||
self.assertEqual(receive.message_list, expected_message_list)
|
||||
|
||||
def test_receive_get_msg(self):
|
||||
ch = Mock()
|
||||
method = Mock()
|
||||
properties = Mock()
|
||||
receive_msg = 'receive_msg'
|
||||
body = cPickle.dumps(receive_msg)
|
||||
ch_object = {'delivery_tag': 'delivery_tag'}
|
||||
method.configure_mock(**ch_object)
|
||||
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.get_msg(ch, method, properties, body)
|
||||
|
||||
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
|
||||
ch.stop_consuming.assert_call_with()
|
||||
self.assertEqual(receive.message, receive_msg)
|
||||
|
||||
def test_receive_timeout(self):
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.channel = self.mock_channel
|
||||
receive.time_out()
|
||||
self.mock_channel.stop_consuming.assert_called_with()
|
||||
|
||||
def test_create_connection(self):
|
||||
p = patch('pika.ConnectionParameters', autospec=True)
|
||||
self.addCleanup(p.stop)
|
||||
mock_pika_connection_param = p.start()
|
||||
rack_ipc.Messaging()
|
||||
mock_pika_connection_param.assert_called_with(self.mock_RACK_CTX.proxy_ip)
|
||||
|
||||
@patch('pika.ConnectionParameters', autospec=True)
|
||||
def test_create_connection_ipc_endpoint(self, mock_pika_connection_param):
|
||||
ipc_ip = 'ipc_ip'
|
||||
self.mock_RACK_CTX.ipc_endpoint = ipc_ip
|
||||
rack_ipc.Messaging()
|
||||
mock_pika_connection_param.assert_called_with(ipc_ip)
|
||||
|
||||
def test_create_connection_amqp_connection_error(self):
|
||||
self.mock_pika_blocking.side_effect = pika.\
|
||||
exceptions.AMQPConnectionError()
|
||||
self.assertRaises(exceptions.AMQPConnectionError, rack_ipc.Messaging)
|
@ -1,235 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import datetime
|
||||
|
||||
from rackclient import exceptions
|
||||
from mock import patch
|
||||
from rackclient.tests import utils
|
||||
from rackclient.lib.syscall.default import pipe
|
||||
|
||||
|
||||
class PipeTest(utils.LibTestCase):
|
||||
|
||||
def target_context(self):
|
||||
return "syscall.default.pipe"
|
||||
|
||||
def setUp(self):
|
||||
super(PipeTest, self).setUp()
|
||||
patcher = patch('redis.StrictRedis')
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_redis=patcher.start()
|
||||
self.ins_redis = self.mock_redis.return_value
|
||||
|
||||
def test_init_default(self):
|
||||
self.ins_redis.keys.return_value = "data"
|
||||
self.ins_redis.get.return_value = "parent"
|
||||
self.ins_redis.hget.side_effect = ["r","w"]
|
||||
real = pipe.Pipe()
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("parent", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertEquals("r",real.read_state)
|
||||
self.assertEquals("w", real.write_state)
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_read_write_child(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("pid", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertTrue(isinstance(real.read_state, datetime.datetime))
|
||||
self.assertTrue(isinstance(real.write_state, datetime.datetime))
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_read_write_parent(self):
|
||||
self.ins_redis.keys.return_value = "data"
|
||||
self.ins_redis.get.return_value = "parent"
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("parent", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertTrue(isinstance(real.read_state, datetime.datetime))
|
||||
self.assertTrue(isinstance(real.write_state, datetime.datetime))
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_read_write_not_none(self):
|
||||
self.ins_redis.keys.return_value = "data"
|
||||
self.ins_redis.get.return_value = "parent"
|
||||
real = pipe.Pipe(read="",write="")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("parent", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertEquals("close", real.read_state)
|
||||
self.assertEquals("close", real.write_state)
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_name(self):
|
||||
real = pipe.Pipe("test")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertTrue(real.is_named)
|
||||
self.assertEquals("test", real.name)
|
||||
self.assertTrue(isinstance(real.read_state, datetime.datetime))
|
||||
self.assertTrue(isinstance(real.write_state, datetime.datetime))
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_read(self):
|
||||
self.ins_redis.lpop.return_value = "data"
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertEquals("data", real.read())
|
||||
|
||||
def test_read_none(self):
|
||||
self.ins_redis.lpop.side_effect = [None,"data"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertEquals("data", real.read())
|
||||
|
||||
def test_read_EndOfFile(self):
|
||||
self.ins_redis.lpop.return_value = None
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertRaises(exceptions.EndOfFile, real.read)
|
||||
|
||||
def test_read_NoReadDescriptor(self):
|
||||
self.ins_redis.lpop.return_value = None
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
real = pipe.Pipe(read="", write="")
|
||||
self.assertRaises(exceptions.NoReadDescriptor, real.read)
|
||||
|
||||
def test_write(self):
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.ins_redis.hvals.return_value = []
|
||||
self.assertTrue("data", real.write("data"))
|
||||
self.assertTrue(self.ins_redis.rpush.call_count == 1)
|
||||
|
||||
def test_write_NoReadDescriptor(self):
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
self.assertRaises(exceptions.NoReadDescriptor, real.write, "data")
|
||||
self.assertTrue(self.ins_redis.rpush.call_count == 1)
|
||||
|
||||
def test_write_NoWriteDescriptor(self):
|
||||
real = pipe.Pipe(read="",write="")
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
self.assertRaises(exceptions.NoWriteDescriptor, real.write, "data")
|
||||
self.assertTrue(self.ins_redis.rpush.call_count == 0)
|
||||
|
||||
def test_close_reader(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
real.close_reader()
|
||||
self.ins_redis.hset.assert_any_call("pid_read", "pid", "close")
|
||||
|
||||
def test_close_write(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
real.close_writer()
|
||||
self.ins_redis.hset.assert_any_call("pid_write", "pid", "close")
|
||||
|
||||
def test_has_reader_no_states(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= []
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_reader())
|
||||
|
||||
def test_has_reader_states_not_close(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["open", "opne"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_reader())
|
||||
|
||||
def test_has_reader_false(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["close", "close"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertFalse(real.has_reader())
|
||||
|
||||
def test_has_writer_no_states(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= []
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_writer())
|
||||
|
||||
def test_has_writer_states_not_close(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["open", "opne"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_writer())
|
||||
|
||||
def test_has_write_false(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["close", "close"]
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.assertFalse(real.has_writer())
|
||||
|
||||
def test_flush_not_named(self):
|
||||
self.ins_redis.keys.side_effect = ["", ["abc"]]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
real.flush()
|
||||
keys = ["pid", "pid_read", "pid_write", "abc"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_flush_named(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(name="name", read="read",write="write")
|
||||
real.flush()
|
||||
keys = ["name", "name_read", "name_write"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_flush_by_pid(self):
|
||||
self.ins_redis.keys.return_value = ["abc"]
|
||||
pipe.Pipe.flush_by_pid("pid")
|
||||
keys = ["pid", "pid_read", "pid_write", "abc"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_flush_by_name(self):
|
||||
pipe.Pipe.flush_by_name("name")
|
||||
keys = ["name", "name_read", "name_write"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_share_keys_exitst_states_close(self):
|
||||
self.ins_redis.keys.return_value = ["value"]
|
||||
self.ins_redis.get.return_value = "name"
|
||||
self.ins_redis.hget.side_effect = ["close", "close"]
|
||||
self.assertTrue(pipe.Pipe.share("ppid", "pid"))
|
||||
self.ins_redis.set.assert_any_call("name:pid", "name")
|
||||
self.ins_redis.hset.assert_any_call("name_read", "pid", "close")
|
||||
self.ins_redis.hset.assert_any_call("name_write", "pid", "close")
|
||||
|
||||
def test_share_keys_not_exist_states_not_close_ppid_exsits(self):
|
||||
mydatetime = datetime.datetime(2015, 1, 1, 0, 0)
|
||||
class FakeDateTime(datetime.datetime):
|
||||
@classmethod
|
||||
def now(cls):
|
||||
return mydatetime
|
||||
patcher = patch("rackclient.lib.syscall.default.pipe.datetime", FakeDateTime)
|
||||
patcher.start()
|
||||
self.ins_redis.keys.side_effect = [[], ["data"]]
|
||||
self.ins_redis.hget.side_effect = ["read", "write"]
|
||||
self.assertTrue(pipe.Pipe.share("ppid", "pid"))
|
||||
self.ins_redis.set.assert_any_call("ppid:pid", "ppid")
|
||||
self.ins_redis.hset.assert_any_call("ppid_read", "pid", mydatetime)
|
||||
self.ins_redis.hset.assert_any_call("ppid_write", "pid", mydatetime)
|
||||
|
||||
def test_share_false(self):
|
||||
self.ins_redis.keys.side_effect = [[], []]
|
||||
self.assertFalse(pipe.Pipe.share("ppid", "pid"))
|
||||
|
||||
def test_NoDescriptor_str_(self):
|
||||
self.assertEquals("Descriptor Not Found", exceptions.NoDescriptor().__str__())
|
||||
|
@ -1,70 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch
|
||||
from rackclient.tests import utils
|
||||
from rackclient.lib.syscall.default import shm
|
||||
|
||||
class ShmTest(utils.LibTestCase):
|
||||
|
||||
def target_context(self):
|
||||
return "syscall.default.shm"
|
||||
|
||||
def setUp(self):
|
||||
super(ShmTest, self).setUp()
|
||||
patcher = patch('redis.StrictRedis')
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_redis = patcher.start()
|
||||
|
||||
def test_read(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.get.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.read("key"))
|
||||
ins_redis.get.assert_called_once_with("key")
|
||||
|
||||
def test_write(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.set.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.write("key", "value"))
|
||||
ins_redis.set.assert_called_once_with("key", "value")
|
||||
|
||||
def test_list_read(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.llen.return_value = 1
|
||||
ins_redis.lrange.return_value = "value"
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.list_read("key"))
|
||||
ins_redis.lrange.assert_called_once_with("key", 0, 1)
|
||||
|
||||
def test_list_write(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.rpush.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.list_write("key", "value"))
|
||||
ins_redis.rpush.assert_called_once_with("key", "value")
|
||||
|
||||
def test_list_delete_value(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.lrem.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.list_delete_value("key", "value"))
|
||||
ins_redis.lrem.assert_called_once_with("key", 1, "value")
|
||||
|
||||
def test_delete(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.delete.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.delete("key"))
|
||||
ins_redis.delete.assert_called_once_with("key")
|
@ -1,120 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import copy
|
||||
import logging
|
||||
|
||||
from mock import patch, Mock
|
||||
from rackclient.tests import utils
|
||||
from rackclient.lib.syscall.default import signal
|
||||
|
||||
|
||||
class SignalTest(utils.LibTestCase):
|
||||
|
||||
def target_context(self):
|
||||
return "syscall.default.signal"
|
||||
|
||||
def setUp(self):
|
||||
super(SignalTest, self).setUp()
|
||||
logging.basicConfig(level=logging.ERROR)
|
||||
|
||||
@patch('websocket.WebSocketApp')
|
||||
def test_receive(self, mock_websocket_websocketapp):
|
||||
mock_app = Mock()
|
||||
mock_websocket_websocketapp.return_value = mock_app
|
||||
|
||||
s = signal.SignalManager()
|
||||
on_msg_func = 'on_msg_func'
|
||||
excepted_on_msg_func = copy.deepcopy(on_msg_func)
|
||||
s.receive(on_msg_func)
|
||||
|
||||
mock_websocket_websocketapp.\
|
||||
assert_called_with(url=s.url + '/receive',
|
||||
header=['PID: ' + self.mock_RACK_CTX.pid],
|
||||
on_message=s.on_message,
|
||||
on_error=s.on_error,
|
||||
on_close=s.on_close)
|
||||
mock_app.run_forever.assert_called_with()
|
||||
self.assertEqual(s.on_msg_func, excepted_on_msg_func)
|
||||
|
||||
@patch('websocket.WebSocketApp')
|
||||
def test_receive_pid_specified(self, mock_websocket_websocketapp):
|
||||
mock_app = Mock()
|
||||
mock_websocket_websocketapp.return_value = mock_app
|
||||
|
||||
url = '/test_url/'
|
||||
expected_url = url.rstrip('/')
|
||||
s = signal.SignalManager(url=url)
|
||||
on_msg_func = 'on_msg_func'
|
||||
excepted_on_msg_func = copy.deepcopy(on_msg_func)
|
||||
pid = 'singnal_pid'
|
||||
s.receive(on_msg_func, pid=pid)
|
||||
|
||||
self.assertEqual(s.url, expected_url)
|
||||
mock_websocket_websocketapp.assert_called_with(url=s.url + '/receive',
|
||||
header=['PID: ' + pid],
|
||||
on_message=s.on_message,
|
||||
on_error=s.on_error,
|
||||
on_close=s.on_close)
|
||||
mock_app.run_forever.assert_called_with()
|
||||
self.assertEqual(s.on_msg_func, excepted_on_msg_func)
|
||||
|
||||
@patch('websocket.WebSocketApp')
|
||||
def teston_msg_func_receive_pid_specified(self, mock_websocket_websocketapp):
|
||||
mock_app = Mock()
|
||||
mock_websocket_websocketapp.return_value = mock_app
|
||||
|
||||
s = signal.SignalManager()
|
||||
on_msg_func = 'on_msg_func'
|
||||
self.mock_RACK_CTX.pid = None
|
||||
self.assertRaises(Exception, s.receive, on_msg_func)
|
||||
|
||||
def test_on_message(self):
|
||||
on_msg_func = Mock()
|
||||
ws = Mock()
|
||||
s = signal.SignalManager()
|
||||
s.on_msg_func = on_msg_func
|
||||
message = 'test_msg'
|
||||
excepted_message = copy.deepcopy(message)
|
||||
s.on_message(ws, message)
|
||||
|
||||
on_msg_func.assert_called_with(excepted_message)
|
||||
ws.close.assert_called_with()
|
||||
|
||||
def test_on_error(self):
|
||||
ws = Mock()
|
||||
s = signal.SignalManager()
|
||||
error = 'test_error'
|
||||
|
||||
self.assertRaises(Exception, s.on_error, ws, error)
|
||||
ws.close.assert_called_with()
|
||||
|
||||
@patch('websocket.create_connection')
|
||||
def test_send(self, mock_create_connection):
|
||||
target_id = 'target_id'
|
||||
expected_target_id = copy.deepcopy(target_id)
|
||||
message = 'test_msg'
|
||||
expected_message = copy.deepcopy(message)
|
||||
url = '/test_url/'
|
||||
expected_url = url.rstrip('/') + '/send'
|
||||
ws = Mock()
|
||||
mock_create_connection.return_value = ws
|
||||
|
||||
s = signal.SignalManager(url=url)
|
||||
s.send(target_id, message)
|
||||
|
||||
mock_create_connection.\
|
||||
assert_called_with(expected_url,
|
||||
header=['PID: ' + expected_target_id])
|
||||
ws.send.assert_called_with(expected_message)
|
||||
ws.close.assert_called_with()
|
@ -1,213 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1 import processes
|
||||
from rackclient.lib.syscall.default import messaging, file, syscall, pipe
|
||||
from mock import call
|
||||
from mock import Mock
|
||||
|
||||
|
||||
class SyscallTest(utils.LibTestCase):
|
||||
|
||||
def target_context(self):
|
||||
return "syscall.default.syscall"
|
||||
|
||||
def setUp(self):
|
||||
super(SyscallTest, self).setUp()
|
||||
|
||||
def test_fork(self):
|
||||
def create_process(gid, ppid, **kwargs):
|
||||
count = self.mock_RACK_CTX.client.processes.create.call_count
|
||||
d = {'ppid': ppid,
|
||||
'pid': 'pid' + str(count),
|
||||
'gid': gid,
|
||||
'proxy_ip': self.mock_RACK_CTX.proxy_ip}
|
||||
args = kwargs['args']
|
||||
args.update(d)
|
||||
kwargs.update(d)
|
||||
return_process = processes.Process(self.mock_RACK_CTX.client, kwargs)
|
||||
return return_process
|
||||
|
||||
self.mock_RACK_CTX.client.processes.create = Mock(side_effect=create_process)
|
||||
# messaging mock
|
||||
mock_messaging = Mock()
|
||||
messaging.Messaging = Mock(return_value=mock_messaging)
|
||||
msg_list = [{'pid': 'pid1'}, {'pid': 'pid2'}, {'pid': 'pid3'}]
|
||||
mock_messaging.receive_all_msg.return_value = msg_list
|
||||
# pip mock
|
||||
pipe.Pipe = Mock()
|
||||
|
||||
# call fork
|
||||
arg1 = {'test': 'test1'}
|
||||
arg2 = {'test': 'test2'}
|
||||
arg3 = {'test': 'test3'}
|
||||
arg_list = [{'args': arg1},
|
||||
{'args': arg2},
|
||||
{'args': arg3}]
|
||||
process_list = syscall.fork(arg_list)
|
||||
|
||||
# check
|
||||
expected_pipe_share = [call('pid', 'pid1'),
|
||||
call('pid', 'pid2'),
|
||||
call('pid', 'pid3')]
|
||||
self.assertEqual(expected_pipe_share, pipe.Pipe.share.call_args_list)
|
||||
expected_msg_send = [call(message='start', target='pid1'),
|
||||
call(message='start', target='pid2'),
|
||||
call(message='start', target='pid3')]
|
||||
self.assertEqual(expected_msg_send,
|
||||
mock_messaging.send_msg.call_args_list)
|
||||
expected_arg_list = [arg1, arg2, arg3]
|
||||
for process in process_list:
|
||||
self.assertTrue(process.args in expected_arg_list)
|
||||
self.assertEqual(process.ppid, self.mock_RACK_CTX.pid)
|
||||
self.assertEqual(process.gid, self.mock_RACK_CTX.gid)
|
||||
expected_arg_list.remove(process.args)
|
||||
|
||||
def test_bulk_fork_check_connection_recoverable_error(self):
|
||||
# setup
|
||||
def create_process(gid, ppid, **kwargs):
|
||||
count = self.mock_RACK_CTX.client.processes.create.call_count
|
||||
if count == 2:
|
||||
raise Exception()
|
||||
d = {'ppid': ppid,
|
||||
'pid': 'pid' + str(count),
|
||||
'gid': gid,
|
||||
'proxy_ip': self.mock_RACK_CTX.proxy_ip}
|
||||
args = kwargs['args']
|
||||
args.update(d)
|
||||
kwargs.update(d)
|
||||
return_process = processes.Process(self.mock_RACK_CTX.client, kwargs)
|
||||
return return_process
|
||||
|
||||
self.mock_RACK_CTX.client.processes.create = Mock(side_effect=create_process)
|
||||
self.mock_RACK_CTX.client.processes.delete = Mock()
|
||||
|
||||
# messaging mock
|
||||
mock_messaging = Mock()
|
||||
messaging.Messaging = Mock(return_value=mock_messaging)
|
||||
msg_list = [{'pid': 'pid3'}, {'pid': 'pid4'}, {'pid': 'pid5'}]
|
||||
mock_messaging.receive_all_msg.return_value = msg_list
|
||||
|
||||
# pip mock
|
||||
pipe.Pipe = Mock()
|
||||
|
||||
# call fork
|
||||
arg1 = {'test': 'test1'}
|
||||
arg2 = {'test': 'test2'}
|
||||
arg3 = {'test': 'test3'}
|
||||
arg_list = [{'args': arg1},
|
||||
{'args': arg2},
|
||||
{'args': arg3}]
|
||||
process_list = syscall.fork(arg_list)
|
||||
|
||||
# check
|
||||
self.mock_RACK_CTX.client.processes.delete.assert_called_with(self.mock_RACK_CTX.gid, 'pid1')
|
||||
expected_pipe_share = [call('pid', 'pid3'),
|
||||
call('pid', 'pid4'),
|
||||
call('pid', 'pid5')]
|
||||
self.assertEqual(expected_pipe_share, pipe.Pipe.share.call_args_list)
|
||||
expected_msg_send = [call(message='start', target='pid3'),
|
||||
call(message='start', target='pid4'),
|
||||
call(message='start', target='pid5')]
|
||||
self.assertEqual(expected_msg_send,
|
||||
mock_messaging.send_msg.call_args_list)
|
||||
expected_arg_list = [arg1, arg2, arg3]
|
||||
for process in process_list:
|
||||
self.assertTrue(process.args in expected_arg_list)
|
||||
self.assertEqual(process.ppid, self.mock_RACK_CTX.pid)
|
||||
self.assertEqual(process.gid, self.mock_RACK_CTX.gid)
|
||||
expected_arg_list.remove(process.args)
|
||||
|
||||
def test_bulk_fork_error_no_child_process_is_created(self):
|
||||
self.mock_RACK_CTX.client.processes.create = Mock(side_effect=Exception)
|
||||
# call fork
|
||||
arg1 = {'test': 'test1'}
|
||||
arg2 = {'test': 'test2'}
|
||||
arg3 = {'test': 'test3'}
|
||||
arg_list = [{'args': arg1},
|
||||
{'args': arg2},
|
||||
{'args': arg3}]
|
||||
self.assertRaises(Exception, syscall.fork, arg_list)
|
||||
|
||||
def test_check_connection_error_no_child_process_is_active(self):
|
||||
# setup
|
||||
def create_process(gid, ppid, **kwargs):
|
||||
count = self.mock_RACK_CTX.client.processes.create.call_count
|
||||
d = {'ppid': ppid,
|
||||
'pid': 'pid' + str(count),
|
||||
'gid': gid,
|
||||
'proxy_ip': self.mock_RACK_CTX.proxy_ip}
|
||||
args = kwargs['args']
|
||||
args.update(d)
|
||||
kwargs.update(d)
|
||||
return_process = processes.Process(self.mock_RACK_CTX.client, kwargs)
|
||||
return return_process
|
||||
|
||||
self.mock_RACK_CTX.client.processes.create = Mock(side_effect=create_process)
|
||||
self.mock_RACK_CTX.client.processes.delete = Mock()
|
||||
# messaging mock
|
||||
mock_messaging = Mock()
|
||||
messaging.Messaging = Mock(return_value=mock_messaging)
|
||||
msg_list = [{'pid': 'pid6'}]
|
||||
mock_messaging.receive_all_msg.return_value = msg_list
|
||||
|
||||
# call fork
|
||||
arg_list = [{'args': {'test': 'test1'}},
|
||||
{'args': {'test': 'test2'}},
|
||||
{'args': {'test': 'test3'}}]
|
||||
self.assertRaises(Exception, syscall.fork, arg_list)
|
||||
expected_processes_delete = [call(self.mock_RACK_CTX.gid, 'pid1'),
|
||||
call(self.mock_RACK_CTX.gid, 'pid2'),
|
||||
call(self.mock_RACK_CTX.gid, 'pid3')]
|
||||
self.assertEqual(expected_processes_delete,
|
||||
self.mock_RACK_CTX.client.processes.delete.call_args_list)
|
||||
|
||||
def test_pipe_no_arg(self):
|
||||
pipe.Pipe = Mock()
|
||||
return_value = 'pipe'
|
||||
pipe.Pipe.return_value = return_value
|
||||
|
||||
pipe_obj = syscall.pipe()
|
||||
self.assertEqual(pipe_obj, return_value)
|
||||
|
||||
def test_pipe(self):
|
||||
return_value = 'pipe'
|
||||
side_effect = lambda value: return_value + value
|
||||
pipe.Pipe = Mock(side_effect=side_effect)
|
||||
|
||||
name = 'pipe_name'
|
||||
pipe_obj = syscall.pipe(name)
|
||||
self.assertEqual(pipe_obj, return_value + name)
|
||||
|
||||
def test_fopen(self):
|
||||
file.File = Mock()
|
||||
return_value = 'file_obj'
|
||||
file.File.return_value = return_value
|
||||
|
||||
file_path = 'file_path'
|
||||
mode = 'w'
|
||||
file_obj = syscall.fopen(file_path, mode=mode)
|
||||
self.assertEqual(return_value, file_obj)
|
||||
file.File.assert_called_once_with(file_path, mode)
|
||||
|
||||
def test_fopen_no_mode(self):
|
||||
file.File = Mock()
|
||||
return_value = 'file_obj'
|
||||
file.File.return_value = return_value
|
||||
|
||||
file_path = 'file_path'
|
||||
mode = 'r'
|
||||
file_obj = syscall.fopen(file_path)
|
||||
self.assertEqual(return_value, file_obj)
|
||||
file.File.assert_called_once_with(file_path, mode)
|
@ -1,215 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import cPickle
|
||||
import json
|
||||
import logging
|
||||
|
||||
from mock import *
|
||||
from rackclient import exceptions
|
||||
from rackclient.tests import utils
|
||||
from rackclient.lib import initializing
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG.setLevel(logging.WARN)
|
||||
|
||||
|
||||
class InitializingTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(InitializingTest, self).setUp()
|
||||
p = patch("requests.get")
|
||||
self.addCleanup(p.stop)
|
||||
mock_request = p.start()
|
||||
mock_resp = Mock()
|
||||
mock_resp.text= json.dumps(dict(meta=dict(
|
||||
proxy_ip="10.0.0.2",gid="gid", pid="pid", ppid="ppid")))
|
||||
mock_request.return_value = mock_resp
|
||||
|
||||
def test_get_rack_context(self):
|
||||
p = patch("rackclient.lib.initializing.Client")
|
||||
self.addCleanup(p.stop)
|
||||
mock_client = p.start()
|
||||
mock_client = mock_client.return_value
|
||||
|
||||
def proxy_info(args):
|
||||
info = type('', (object,), {})
|
||||
info.ipc_endpoint = None
|
||||
info.fs_endpoint = None
|
||||
info.shm_endpoint = None
|
||||
return info
|
||||
|
||||
mock_client.proxy = Mock()
|
||||
mock_client_processes = Mock()
|
||||
mock_client.proxy.get.side_effect = proxy_info
|
||||
p2 = patch("rackclient.lib.initializing._Messaging")
|
||||
self.addCleanup(p2.stop)
|
||||
mock_messaging = p2.start()
|
||||
mock_messaging = mock_messaging.return_value
|
||||
mock_messaging.receive_msg.return_value=dict(pid="ppid")
|
||||
actual_context = initializing.get_rack_context()
|
||||
|
||||
expect_context = type('', (object,), dict(
|
||||
proxy_ip="10.0.0.2",
|
||||
gid="gid", pid="pid",
|
||||
ppid="ppid",
|
||||
ipc_endpoint=None,
|
||||
fs_endpoint=None,
|
||||
shm_endpoint=None,
|
||||
client=mock_client))
|
||||
|
||||
self.assertEquals(expect_context.pid, actual_context.pid)
|
||||
self.assertEquals(expect_context.ppid, actual_context.ppid)
|
||||
self.assertEquals(expect_context.proxy_ip, actual_context.proxy_ip)
|
||||
self.assertEquals(expect_context.ipc_endpoint, actual_context.ipc_endpoint)
|
||||
self.assertEquals(expect_context.fs_endpoint, actual_context.fs_endpoint)
|
||||
self.assertEquals(expect_context.shm_endpoint, actual_context.shm_endpoint)
|
||||
|
||||
def test_get_rack_cotext_ProcessInitError_due_to_proxy(self):
|
||||
self.p = patch("rackclient.lib.initializing.Client")
|
||||
self.addCleanup(self.p.stop)
|
||||
mock_client = self.p.start()
|
||||
mock_client = mock_client.return_value
|
||||
mock_client.proxy = Mock()
|
||||
mock_client_processes = Mock()
|
||||
mock_client.proxy.get.side_effect = Exception()
|
||||
self.assertRaises(Exception, initializing.get_rack_context)
|
||||
|
||||
def test_get_rack_cotext_ProcessInitError_doe_to_processes(self):
|
||||
self.p = patch("rackclient.lib.initializing.Client")
|
||||
self.addCleanup(self.p.stop)
|
||||
mock_client = self.p.start()
|
||||
mock_client = mock_client.return_value
|
||||
mock_client.proxy = Mock()
|
||||
mock_client_processes = Mock()
|
||||
mock_client.processes.get.side_effect = exceptions.NotFound("")
|
||||
self.assertRaises(Exception, initializing.get_rack_context)
|
||||
|
||||
@patch("rackclient.lib.initializing._Messaging.Receive")
|
||||
def test_messaging_receive_msg(self, mock_receive):
|
||||
self.mock_connection = Mock()
|
||||
self.mock_channel = Mock()
|
||||
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
|
||||
self.addCleanup(self.patch_pika_blocking.stop)
|
||||
self.mock_pika_blocking = self.patch_pika_blocking.start()
|
||||
self.mock_pika_blocking.return_value = self.mock_connection
|
||||
self.mock_connection.channel.return_value = self.mock_channel
|
||||
|
||||
context = type('', (object,), dict(
|
||||
proxy_ip="10.0.0.2",
|
||||
gid="gid", pid="pid",
|
||||
ppid="ppid",
|
||||
ipc_endpoint=None,
|
||||
fs_endpoint=None,
|
||||
shm_endpoint=None))
|
||||
|
||||
timeout_limit = 123
|
||||
msg = initializing._Messaging(context)
|
||||
message = msg.receive_msg(timeout_limit=timeout_limit)
|
||||
|
||||
self.mock_connection.add_timeout.\
|
||||
assert_called_with(deadline=int(timeout_limit),
|
||||
callback_method=mock_receive().time_out)
|
||||
self.mock_channel.\
|
||||
basic_consume.assert_called_with(mock_receive().get_msg,
|
||||
queue="pid",
|
||||
no_ack=False)
|
||||
self.mock_channel.start_consuming.assert_called_with()
|
||||
self.assertEqual(message, mock_receive().message)
|
||||
|
||||
|
||||
def test_messaging_send_msg(self):
|
||||
self.mock_connection = Mock()
|
||||
self.mock_channel = Mock()
|
||||
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
|
||||
self.addCleanup(self.patch_pika_blocking.stop)
|
||||
self.mock_pika_blocking = self.patch_pika_blocking.start()
|
||||
self.mock_pika_blocking.return_value = self.mock_connection
|
||||
self.mock_connection.channel.return_value = self.mock_channel
|
||||
|
||||
context = type('', (object,), dict(
|
||||
proxy_ip="10.0.0.2",
|
||||
gid="gid", pid="pid",
|
||||
ppid="ppid",
|
||||
ipc_endpoint=None,
|
||||
fs_endpoint=None,
|
||||
shm_endpoint=None))
|
||||
|
||||
send_msg = 'test_msg'
|
||||
target = 'test_pid'
|
||||
msg = initializing._Messaging(context)
|
||||
msg.send_msg(target)
|
||||
routing_key = context.gid + '.' + target
|
||||
send_dict = {'pid': context.pid}
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
self.mock_channel.\
|
||||
basic_publish.assert_called_with(exchange=context.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
def test_receive_get_msg(self):
|
||||
self.mock_connection = Mock()
|
||||
self.mock_channel = Mock()
|
||||
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
|
||||
self.addCleanup(self.patch_pika_blocking.stop)
|
||||
self.mock_pika_blocking = self.patch_pika_blocking.start()
|
||||
self.mock_pika_blocking.return_value = self.mock_connection
|
||||
self.mock_connection.channel.return_value = self.mock_channel
|
||||
|
||||
ch = Mock()
|
||||
method = Mock()
|
||||
properties = Mock()
|
||||
receive_msg = 'receive_msg'
|
||||
body = cPickle.dumps(receive_msg)
|
||||
ch_object = {'delivery_tag': 'delivery_tag'}
|
||||
method.configure_mock(**ch_object)
|
||||
|
||||
context = type('', (object,), dict(
|
||||
proxy_ip="10.0.0.2",
|
||||
gid="gid", pid="pid",
|
||||
ppid="ppid",
|
||||
ipc_endpoint=None,
|
||||
fs_endpoint=None,
|
||||
shm_endpoint=None))
|
||||
|
||||
msg = initializing._Messaging(context)
|
||||
receive = msg.Receive()
|
||||
receive.get_msg(ch, method, properties, body)
|
||||
|
||||
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
|
||||
ch.stop_consuming.assert_call_with()
|
||||
self.assertEqual(receive.message, receive_msg)
|
||||
|
||||
def test_receive_timeout(self):
|
||||
self.mock_connection = Mock()
|
||||
self.mock_channel = Mock()
|
||||
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
|
||||
self.addCleanup(self.patch_pika_blocking.stop)
|
||||
self.mock_pika_blocking = self.patch_pika_blocking.start()
|
||||
self.mock_pika_blocking.return_value = self.mock_connection
|
||||
self.mock_connection.channel.return_value = self.mock_channel
|
||||
|
||||
context = type('', (object,), dict(
|
||||
proxy_ip="10.0.0.2",
|
||||
gid="gid", pid="pid",
|
||||
ppid="ppid",
|
||||
ipc_endpoint="data",
|
||||
fs_endpoint=None,
|
||||
shm_endpoint=None))
|
||||
|
||||
msg = initializing._Messaging(context)
|
||||
receive = msg.Receive()
|
||||
receive.channel = self.mock_channel
|
||||
receive.time_out()
|
||||
self.mock_channel.stop_consuming.assert_called_with()
|
@ -1,129 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import logging
|
||||
import mock
|
||||
import fixtures
|
||||
import requests
|
||||
from rackclient import client
|
||||
from rackclient.tests import utils
|
||||
|
||||
|
||||
class ClientTest(utils.TestCase):
|
||||
|
||||
def test_log_req(self):
|
||||
self.logger = self.useFixture(
|
||||
fixtures.FakeLogger(
|
||||
name=client.__name__,
|
||||
format="%(message)s",
|
||||
level=logging.DEBUG,
|
||||
nuke_handlers=True
|
||||
)
|
||||
)
|
||||
cs = client.HTTPClient('rack_url', True)
|
||||
|
||||
cs.http_log_req('GET', '/foo', {'headers': {}})
|
||||
cs.http_log_req('GET', '/foo', {'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': 'python-rackclient'
|
||||
}})
|
||||
|
||||
data = {'group': {
|
||||
'name': 'group1',
|
||||
'description': 'This is group1'
|
||||
}}
|
||||
cs.http_log_req('POST', '/foo', {
|
||||
'headers': {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
'data': json.dumps(data)
|
||||
})
|
||||
|
||||
output = self.logger.output.split('\n')
|
||||
self.assertIn("REQ: curl -i '/foo' -X GET", output)
|
||||
self.assertIn("REQ: curl -i '/foo' -X GET "
|
||||
'-H "Content-Type: application/json" '
|
||||
'-H "User-Agent: python-rackclient"',
|
||||
output)
|
||||
self.assertIn("REQ: curl -i '/foo' -X POST "
|
||||
'-H "Content-Type: application/json" '
|
||||
'-d \'{"group": {"name": "group1", '
|
||||
'"description": "This is group1"}}\'',
|
||||
output)
|
||||
|
||||
def test_log_resp(self):
|
||||
self.logger = self.useFixture(
|
||||
fixtures.FakeLogger(
|
||||
name=client.__name__,
|
||||
format="%(message)s",
|
||||
level=logging.DEBUG,
|
||||
nuke_handlers=True
|
||||
)
|
||||
)
|
||||
cs = client.HTTPClient('rack_url', True)
|
||||
|
||||
text = '{"group": {"name": "group1", "description": "This is group1"}}'
|
||||
resp = utils.TestResponse({'status_code': 200, 'headers': {},
|
||||
'text': text})
|
||||
cs.http_log_resp(resp)
|
||||
|
||||
output = self.logger.output.split('\n')
|
||||
self.assertIn("RESP: [200] {}", output)
|
||||
self.assertIn('RESP BODY: {"group": {"name": "group1", '
|
||||
'"description": "This is group1"}}', output)
|
||||
|
||||
def test_request(self):
|
||||
cs = client.HTTPClient('http://www.foo.com', False)
|
||||
data = (
|
||||
'{"group": { "gid": "11111111",'
|
||||
'"user_id": "4ffc664c198e435e9853f253lkbcd7a7",'
|
||||
'"project_id": "9sac664c198e435e9853f253lkbcd7a7",'
|
||||
'"name": "group1",'
|
||||
'"description": "This is group1",'
|
||||
'"status": "ACTIVE"}}'
|
||||
)
|
||||
|
||||
mock_request = mock.Mock()
|
||||
mock_request.return_value = requests.Response()
|
||||
mock_request.return_value.status_code = 201
|
||||
mock_request.return_value._content = data
|
||||
|
||||
with mock.patch('requests.request', mock_request):
|
||||
resp, body = cs.post('/groups', body=data)
|
||||
kwargs = {
|
||||
'headers': {
|
||||
'User-Agent': 'python-rackclient',
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
'data': json.dumps(data)
|
||||
}
|
||||
mock_request.assert_called_with('POST',
|
||||
'http://www.foo.com/groups',
|
||||
**kwargs)
|
||||
|
||||
def test_request_raise_exception(self):
|
||||
cs = client.HTTPClient('http://www.foo.com', False)
|
||||
|
||||
mock_request = mock.Mock()
|
||||
mock_request.return_value = requests.Response()
|
||||
mock_request.return_value.status_code = 404
|
||||
|
||||
mock_exec = mock.Mock()
|
||||
mock_exec.return_value = Exception('Not Found')
|
||||
|
||||
with mock.patch('requests.request', mock_request):
|
||||
with mock.patch('rackclient.exceptions.from_response',
|
||||
mock_exec):
|
||||
self.assertRaises(Exception, cs.get, '/groups')
|
@ -1,75 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from fixtures import fixture
|
||||
import requests
|
||||
import testtools
|
||||
from rackclient import client
|
||||
from mock import *
|
||||
|
||||
|
||||
class TestCase(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestCase, self).setUp()
|
||||
|
||||
|
||||
class LibTestCase(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LibTestCase, self).setUp()
|
||||
patcher = patch(
|
||||
"rackclient.lib." + self.target_context() + ".RACK_CTX")
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_RACK_CTX = patcher.start()
|
||||
self._init_context()
|
||||
|
||||
def _init_context(self):
|
||||
self.mock_RACK_CTX.gid = "gid"
|
||||
self.mock_RACK_CTX.pid = "pid"
|
||||
self.mock_RACK_CTX.ppid = None
|
||||
self.mock_RACK_CTX.proxy_ip = "10.0.0.2"
|
||||
self.mock_RACK_CTX.proxy_url = 'http://10.0.0.2:8088/v1'
|
||||
self.mock_RACK_CTX.client = \
|
||||
client.Client('1', rack_url=self.mock_RACK_CTX.proxy_url)
|
||||
self.mock_RACK_CTX.fs_endpoint = None
|
||||
self.mock_RACK_CTX.ipc_endpoint = None
|
||||
self.mock_RACK_CTX.shm_endpoint = None
|
||||
|
||||
def target_context(self):
|
||||
pass
|
||||
|
||||
|
||||
class TestResponse(requests.Response):
|
||||
"""
|
||||
Class used to wrap requests.Response and provide some
|
||||
convenience to initialize with a dict
|
||||
"""
|
||||
|
||||
def __init__(self, data):
|
||||
super(TestResponse, self).__init__()
|
||||
self._text = None
|
||||
if isinstance(data, dict):
|
||||
self.status_code = data.get('status_code')
|
||||
self.headers = data.get('headers')
|
||||
# Fake the text attribute to streamline Response creation
|
||||
self._text = data.get('text')
|
||||
else:
|
||||
self.status_code = data
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self._text
|
@ -1,359 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import urlparse
|
||||
from rackclient import client as base_client
|
||||
from rackclient.tests import fakes
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1 import client
|
||||
|
||||
|
||||
class FakeClient(fakes.FakeClient, client.Client):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
client.Client.__init__(self, 'rack_rul', 'http_log_debug')
|
||||
self.client = FakeHTTPClient()
|
||||
|
||||
|
||||
class FakeHTTPClient(base_client.HTTPClient):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.rack_url = 'rack_url'
|
||||
self.http_log_debug = 'http_log_debug'
|
||||
self.callstack = []
|
||||
|
||||
def request(self, url, method, **kwargs):
|
||||
if method in ['GET', 'DELETE']:
|
||||
assert 'body' not in kwargs
|
||||
elif method == 'PUT':
|
||||
assert 'body' in kwargs
|
||||
|
||||
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
|
||||
kwargs.update(args)
|
||||
munged_url = url.rsplit('?', 1)[0]
|
||||
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
|
||||
munged_url = munged_url.replace('-', '_')
|
||||
munged_url = munged_url.replace(' ', '_')
|
||||
|
||||
callback = "%s_%s" % (method.lower(), munged_url)
|
||||
|
||||
if not hasattr(self, callback):
|
||||
raise AssertionError('Called unknown API method: %s %s, '
|
||||
'expected fakes method name: %s' %
|
||||
(method, url, callback))
|
||||
|
||||
self.callstack.append((method, url, kwargs.get('body')))
|
||||
|
||||
status, headers, body = getattr(self, callback)(**kwargs)
|
||||
r = utils.TestResponse({
|
||||
"status_code": status,
|
||||
"text": body,
|
||||
"headers": headers,
|
||||
})
|
||||
return r, body
|
||||
|
||||
#
|
||||
# groups
|
||||
#
|
||||
|
||||
def get_groups(self, **kw):
|
||||
groups = {'groups': [
|
||||
{
|
||||
'gid': '11111111',
|
||||
"user_id": '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
"project_id": '9sac664c198e435e9853f253lkbcd7a7',
|
||||
"name": 'group1',
|
||||
"description": 'This is group1',
|
||||
"status": 'ACTIVE'
|
||||
},
|
||||
{
|
||||
'gid': '22222222',
|
||||
"user_id": '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
"project_id": '9sac664c198e435e9853f253lkbcd7a7',
|
||||
"name": 'group2',
|
||||
"description": 'This is group2',
|
||||
"status": 'ACTIVE'
|
||||
}
|
||||
]}
|
||||
return (200, {}, groups)
|
||||
|
||||
def get_groups_11111111(self, **kw):
|
||||
group = {'group': self.get_groups()[2]["groups"][0]}
|
||||
return (200, {}, group)
|
||||
|
||||
def post_groups(self, body, **kw):
|
||||
group = {'group': self.get_groups()[2]["groups"][0]}
|
||||
return (201, {}, group)
|
||||
|
||||
def put_groups_11111111(self, body, **kw):
|
||||
group = {'group': self.get_groups()[2]["groups"][0]}
|
||||
return (200, {}, group)
|
||||
|
||||
def delete_groups_11111111(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# keypairs
|
||||
#
|
||||
|
||||
def get_groups_11111111_keypairs(self, **kw):
|
||||
keypairs = {'keypairs': [
|
||||
{
|
||||
'keypair_id': 'aaaaaaaa',
|
||||
'nova_keypair_id': 'keypair1',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'keypair1',
|
||||
'private_key': '1234',
|
||||
'is_default': True,
|
||||
'status': 'Exist'
|
||||
},
|
||||
{
|
||||
'keypair_id': 'bbbbbbbb',
|
||||
'nova_keypair_id': 'keypair2',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'keypair2',
|
||||
'private_key': '5678',
|
||||
'is_default': False,
|
||||
'status': 'Exist'
|
||||
}
|
||||
]}
|
||||
return (200, {}, keypairs)
|
||||
|
||||
def get_groups_11111111_keypairs_aaaaaaaa(self, **kw):
|
||||
keypair = {'keypair': self.get_groups_11111111_keypairs()[2]['keypairs'][0]}
|
||||
return (200, {}, keypair)
|
||||
|
||||
def post_groups_11111111_keypairs(self, body, **kw):
|
||||
keypair = {'keypair': self.get_groups_11111111_keypairs()[2]['keypairs'][0]}
|
||||
return (201, {}, keypair)
|
||||
|
||||
def put_groups_11111111_keypairs_aaaaaaaa(self, body, **kw):
|
||||
keypair = {'keypair': self.get_groups_11111111_keypairs()[2]['keypairs'][0]}
|
||||
return (200, {}, keypair)
|
||||
|
||||
def delete_groups_11111111_keypairs_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# securitygroups
|
||||
#
|
||||
|
||||
def get_groups_11111111_securitygroups(self, **kw):
|
||||
securitygroups = {'securitygroups': [
|
||||
{
|
||||
'securitygroup_id': 'aaaaaaaa',
|
||||
'neutron_securitygroup_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'securitygroup1',
|
||||
'is_default': True,
|
||||
'status': 'Exist'
|
||||
},
|
||||
{
|
||||
'securitygroup_id': 'bbbbbbbb',
|
||||
'neutron_securitygroup_id': 'qqqqqqqq',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'securitygroup2',
|
||||
'is_default': False,
|
||||
'status': 'Exist'
|
||||
}
|
||||
]}
|
||||
return (200, {}, securitygroups)
|
||||
|
||||
def get_groups_11111111_securitygroups_aaaaaaaa(self, **kw):
|
||||
securitygroup = {'securitygroup': self.get_groups_11111111_securitygroups()[2]['securitygroups'][0]}
|
||||
return (200, {}, securitygroup)
|
||||
|
||||
def post_groups_11111111_securitygroups(self, body, **kw):
|
||||
securitygroup = {'securitygroup': self.get_groups_11111111_securitygroups()[2]['securitygroups'][0]}
|
||||
return (201, {}, securitygroup)
|
||||
|
||||
def put_groups_11111111_securitygroups_aaaaaaaa(self, body, **kw):
|
||||
securitygroup = {'securitygroup': self.get_groups_11111111_securitygroups()[2]['securitygroups'][0]}
|
||||
return (200, {}, securitygroup)
|
||||
|
||||
def delete_groups_11111111_securitygroups_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# networks
|
||||
#
|
||||
|
||||
def get_groups_11111111_networks(self, **kw):
|
||||
networks = {'networks': [
|
||||
{
|
||||
'network_id': 'aaaaaaaa',
|
||||
'neutron_network_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'network1',
|
||||
'is_admin': True,
|
||||
'ext_router_id': 'rrrrrrrr',
|
||||
'status': 'Exist'
|
||||
},
|
||||
{
|
||||
'network_id': 'bbbbbbbb',
|
||||
'neutron_network_id': 'qqqqqqqq',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'network2',
|
||||
'is_admin': False,
|
||||
'ext_router_id': 'rrrrrrrr',
|
||||
'status': 'Exist'
|
||||
}
|
||||
]}
|
||||
return (200, {}, networks)
|
||||
|
||||
def get_groups_11111111_networks_aaaaaaaa(self, **kw):
|
||||
network = {'network': self.get_groups_11111111_networks()[2]['networks'][0]}
|
||||
return (200, {}, network)
|
||||
|
||||
def post_groups_11111111_networks(self, body, **kw):
|
||||
network = {'network': self.get_groups_11111111_networks()[2]['networks'][0]}
|
||||
return (201, {}, network)
|
||||
|
||||
def delete_groups_11111111_networks_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# processes
|
||||
#
|
||||
|
||||
def get_groups_11111111_processes(self, **kw):
|
||||
processes = {'processes': [
|
||||
{
|
||||
'nova_instance_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'pid': 'aaaaaaaa',
|
||||
'ppid': None,
|
||||
'name': 'process1',
|
||||
'glance_image_id': 'xxxxxxxx',
|
||||
'nova_flavor_id': 'yyyyyyyy',
|
||||
'keypair_id': 'iiiiiiii',
|
||||
'securitygroup_ids': [
|
||||
'jjjjjjjj', 'kkkkkkkk'
|
||||
],
|
||||
'networks': [
|
||||
{'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'}
|
||||
],
|
||||
'app_status': 'ACTIVE',
|
||||
'userdata': 'IyEvYmluL3NoICBlY2hvICJIZWxsbyI=',
|
||||
'status': 'ACTIVE',
|
||||
'args': {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
},
|
||||
{
|
||||
'process_id': 'bbbbbbbb',
|
||||
'nova_instance_id': 'qqqqqqqq',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'pid': 'bbbbbbbb',
|
||||
'ppid': 'aaaaaaaa',
|
||||
'name': 'process2',
|
||||
'glance_image_id': 'xxxxxxxx',
|
||||
'nova_flavor_id': 'yyyyyyyy',
|
||||
'keypair_id': 'iiiiiiii',
|
||||
'securitygroup_ids': [
|
||||
'jjjjjjjj', 'kkkkkkkk'
|
||||
],
|
||||
'networks': [
|
||||
{'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.3',
|
||||
'floating': '2.2.2.2'}
|
||||
],
|
||||
'app_status': 'ACTIVE',
|
||||
'userdata': 'IyEvYmluL3NoICBlY2hvICJIZWxsbyI=',
|
||||
'status': 'ACTIVE',
|
||||
'args': {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
}
|
||||
]}
|
||||
return (200, {}, processes)
|
||||
|
||||
def get_groups_11111111_processes_aaaaaaaa(self, **kw):
|
||||
process = {'process': self.get_groups_11111111_processes()[2]['processes'][0]}
|
||||
return (200, {}, process)
|
||||
|
||||
def post_groups_11111111_processes(self, body, **kw):
|
||||
process = {'process': self.get_groups_11111111_processes()[2]['processes'][0]}
|
||||
return (202, {}, process)
|
||||
|
||||
def put_groups_11111111_processes_aaaaaaaa(self, body, **kw):
|
||||
process = {'process': self.get_groups_11111111_processes()[2]['processes'][0]}
|
||||
return (200, {}, process)
|
||||
|
||||
def delete_groups_11111111_processes_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# proxy
|
||||
#
|
||||
|
||||
def get_groups_11111111_proxy(self, **kw):
|
||||
proxy = {'proxy': {
|
||||
'nova_instance_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'pid': 'aaaaaaaa',
|
||||
'ppid': None,
|
||||
'name': 'proxy',
|
||||
'glance_image_id': 'xxxxxxxx',
|
||||
'nova_flavor_id': 'yyyyyyyy',
|
||||
'keypair_id': 'iiiiiiii',
|
||||
'securitygroup_ids': [
|
||||
'jjjjjjjj', 'kkkkkkkk'
|
||||
],
|
||||
'networks': [
|
||||
{'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'}
|
||||
],
|
||||
'app_status': 'ACTIVE',
|
||||
'userdata': 'IyEvYmluL3NoICBlY2hvICJIZWxsbyI=',
|
||||
'status': 'ACTIVE',
|
||||
'args': {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
},
|
||||
'ipc_endpoint': 'ipc_endpoint',
|
||||
'shm_endpoint': 'shm_endpoint',
|
||||
'fs_endpoint': 'fs_endpoint'
|
||||
}}
|
||||
return (200, {}, proxy)
|
||||
|
||||
def post_groups_11111111_proxy(self, body, **kw):
|
||||
proxy = {'proxy': self.get_groups_11111111_proxy()[2]['proxy']}
|
||||
return (202, {}, proxy)
|
||||
|
||||
def put_groups_11111111_proxy(self, body, **kw):
|
||||
proxy = {'proxy': self.get_groups_11111111_proxy()[2]['proxy']}
|
||||
return (200, {}, proxy)
|
@ -1,2 +0,0 @@
|
||||
key1=value1
|
||||
key2=value2
|
@ -1,70 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import groups
|
||||
|
||||
|
||||
class GroupsTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(GroupsTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.group_type = groups.Group
|
||||
|
||||
def test_list(self):
|
||||
groups = self.cs.groups.list()
|
||||
self.cs.assert_called('GET', '/groups')
|
||||
for group in groups:
|
||||
self.assertIsInstance(group, self.group_type)
|
||||
|
||||
def test_get(self):
|
||||
group = self.cs.groups.get('11111111')
|
||||
self.cs.assert_called('GET', '/groups/11111111')
|
||||
self.assertEqual('11111111', group.gid)
|
||||
self.assertEqual('4ffc664c198e435e9853f253lkbcd7a7', group.user_id)
|
||||
self.assertEqual('9sac664c198e435e9853f253lkbcd7a7', group.project_id)
|
||||
self.assertEqual('group1', group.name)
|
||||
self.assertEqual('This is group1', group.description)
|
||||
self.assertEqual('ACTIVE', group.status)
|
||||
|
||||
def _create_body(self, name, description):
|
||||
return {
|
||||
'group': {
|
||||
'name': name,
|
||||
'description': description
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
name = 'group1'
|
||||
description = 'This is group1'
|
||||
group = self.cs.groups.create(name, description)
|
||||
body = self._create_body(name, description)
|
||||
self.cs.assert_called('POST', '/groups', body)
|
||||
self.assertIsInstance(group, self.group_type)
|
||||
|
||||
def test_update(self):
|
||||
gid = '11111111'
|
||||
name = 'group1'
|
||||
description = 'This is group1'
|
||||
group = self.cs.groups.update(gid, name, description)
|
||||
body = self._create_body(name, description)
|
||||
self.cs.assert_called('PUT', '/groups/11111111', body)
|
||||
self.assertIsInstance(group, self.group_type)
|
||||
|
||||
def test_delete(self):
|
||||
gid = '11111111'
|
||||
self.cs.groups.delete(gid)
|
||||
self.cs.assert_called('DELETE', '/groups/11111111')
|
@ -1,2 +0,0 @@
|
||||
key1=value1
|
||||
key2:value2
|
@ -1,97 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import keypairs
|
||||
|
||||
|
||||
class KeypairsTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(KeypairsTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.keypair_type = keypairs.Keypair
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
keypairs = self.cs.keypairs.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/keypairs' % self.gid)
|
||||
for keypair in keypairs:
|
||||
self.assertIsInstance(keypair, self.keypair_type)
|
||||
|
||||
def test_get(self):
|
||||
keypair_id = 'aaaaaaaa'
|
||||
keypair = self.cs.keypairs.get(self.gid, keypair_id)
|
||||
self.cs.assert_called('GET', '/groups/%s/keypairs/%s' % (self.gid, keypair_id))
|
||||
self.assertEqual(self.gid, keypair.gid)
|
||||
self.assertEqual(self.user_id, keypair.user_id)
|
||||
self.assertEqual(self.project_id, keypair.project_id)
|
||||
self.assertEqual(keypair_id, keypair.keypair_id)
|
||||
self.assertEqual('keypair1', keypair.nova_keypair_id)
|
||||
self.assertEqual('keypair1', keypair.name)
|
||||
self.assertEqual('1234', keypair.private_key)
|
||||
self.assertEqual(True, keypair.is_default)
|
||||
self.assertEqual('Exist', keypair.status)
|
||||
|
||||
def _create_body(self, name, is_default):
|
||||
return {
|
||||
'keypair': {
|
||||
'name': name,
|
||||
'is_default': is_default
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
name = 'keypair1'
|
||||
is_default = True
|
||||
keypair = self.cs.keypairs.create(self.gid, name, is_default)
|
||||
body = self._create_body(name, is_default)
|
||||
self.cs.assert_called('POST', '/groups/%s/keypairs' % self.gid, body)
|
||||
self.assertIsInstance(keypair, self.keypair_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
name = 'keypair1'
|
||||
is_default = 'invalid'
|
||||
self.assertRaises(exc.CommandError, self.cs.keypairs.create,
|
||||
self.gid, name, is_default)
|
||||
|
||||
def _update_body(self, is_default):
|
||||
return {
|
||||
'keypair': {
|
||||
'is_default': is_default
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
is_default = True
|
||||
keypair_id = 'aaaaaaaa'
|
||||
keypair = self.cs.keypairs.update(self.gid,
|
||||
keypair_id, is_default)
|
||||
body = self._update_body(is_default)
|
||||
self.cs.assert_called('PUT', '/groups/%s/keypairs/%s' % (self.gid, keypair_id), body)
|
||||
self.assertIsInstance(keypair, self.keypair_type)
|
||||
|
||||
def test_update_invalid_parameters(self):
|
||||
is_default = 'invalid'
|
||||
keypair_id = 'aaaaaaaa'
|
||||
self.assertRaises(exc.CommandError, self.cs.keypairs.update,
|
||||
self.gid, keypair_id, is_default)
|
||||
|
||||
def test_delete(self):
|
||||
keypair_id = 'aaaaaaaa'
|
||||
self.cs.keypairs.delete(self.gid, keypair_id)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/keypairs/%s' % (self.gid, keypair_id))
|
@ -1,101 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import networks
|
||||
|
||||
|
||||
class NetworksTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NetworksTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.network_type = networks.Network
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
networks = self.cs.networks.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/networks' % self.gid)
|
||||
for network in networks:
|
||||
self.assertIsInstance(network, self.network_type)
|
||||
|
||||
def test_get(self):
|
||||
network_id = 'aaaaaaaa'
|
||||
network = self.cs.networks.get(self.gid, network_id)
|
||||
self.cs.assert_called('GET', '/groups/%s/networks/%s' % (self.gid, network_id))
|
||||
self.assertEqual(self.gid, network.gid)
|
||||
self.assertEqual(self.user_id, network.user_id)
|
||||
self.assertEqual(self.project_id, network.project_id)
|
||||
self.assertEqual(network_id, network.network_id)
|
||||
self.assertEqual('pppppppp', network.neutron_network_id)
|
||||
self.assertEqual('network1', network.name)
|
||||
self.assertEqual(True, network.is_admin)
|
||||
self.assertEqual('rrrrrrrr', network.ext_router_id)
|
||||
self.assertEqual('Exist', network.status)
|
||||
|
||||
def _create_body(self, cidr, name=None, is_admin=False, gateway=None, dns_nameservers=None, ext_router_id=None):
|
||||
return {
|
||||
'network': {
|
||||
'cidr': cidr,
|
||||
'name': name,
|
||||
'is_admin': is_admin,
|
||||
'gateway': gateway,
|
||||
'dns_nameservers': dns_nameservers,
|
||||
'ext_router_id': ext_router_id
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
cidr = '10.0.0.0/24'
|
||||
name = 'network1'
|
||||
is_admin = True
|
||||
dns_nameservers = ['8.8.8.8', '8.8.4.4']
|
||||
gateway = '10.0.0.254'
|
||||
ext_router_id = 'rrrrrrrr'
|
||||
|
||||
network = self.cs.networks.create(
|
||||
self.gid, cidr, name, is_admin, gateway,
|
||||
dns_nameservers, ext_router_id)
|
||||
body = self._create_body(
|
||||
cidr, name, is_admin, gateway,
|
||||
dns_nameservers, ext_router_id)
|
||||
self.cs.assert_called('POST', '/groups/%s/networks' % self.gid, body)
|
||||
self.assertIsInstance(network, self.network_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
name = 'network1'
|
||||
ext_router_id = 'rrrrrrrr'
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, 'invalid', name, True, '10.0.0.254',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0', name, True, '10.0.0.254',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0/24', name, 'invalid', '10.0.0.254',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0/24', name, True, 'invalid',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0/24', name, True, '10.0.0.254',
|
||||
{}, ext_router_id)
|
||||
|
||||
def test_delete(self):
|
||||
network_id = 'aaaaaaaa'
|
||||
self.cs.networks.delete(self.gid, network_id)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/networks/%s' % (self.gid, network_id))
|
@ -1,135 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import base64
|
||||
import tempfile
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import processes
|
||||
|
||||
|
||||
class ProcesssTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ProcesssTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.process_type = processes.Process
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
processes = self.cs.processes.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/processes' % self.gid)
|
||||
for process in processes:
|
||||
self.assertIsInstance(process, self.process_type)
|
||||
|
||||
def test_get(self):
|
||||
pid = 'aaaaaaaa'
|
||||
process = self.cs.processes.get(self.gid, pid)
|
||||
self.cs.assert_called('GET', '/groups/%s/processes/%s' % (self.gid, pid))
|
||||
self.assertEqual(self.gid, process.gid)
|
||||
self.assertEqual(self.user_id, process.user_id)
|
||||
self.assertEqual(self.project_id, process.project_id)
|
||||
self.assertEqual(pid, process.pid)
|
||||
self.assertEqual(None, process.ppid)
|
||||
self.assertEqual('pppppppp', process.nova_instance_id)
|
||||
self.assertEqual('process1', process.name)
|
||||
self.assertEqual('xxxxxxxx', process.glance_image_id)
|
||||
self.assertEqual('yyyyyyyy', process.nova_flavor_id)
|
||||
self.assertEqual('iiiiiiii', process.keypair_id)
|
||||
self.assertEqual(['jjjjjjjj', 'kkkkkkkk'], process.securitygroup_ids)
|
||||
networks = [{
|
||||
'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'
|
||||
}]
|
||||
self.assertEqual(networks, process.networks)
|
||||
self.assertEqual('ACTIVE', process.app_status)
|
||||
self.assertEqual('ACTIVE', process.status)
|
||||
self.assertEqual('IyEvYmluL3NoICBlY2hvICJIZWxsbyI=', process.userdata)
|
||||
args = {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
self.assertEqual(args, process.args)
|
||||
|
||||
|
||||
def _create_body(self, ppid=None, name=None, nova_flavor_id=None,
|
||||
glance_image_id=None, keypair_id=None,
|
||||
securitygroup_ids=None, userdata=None, args=None):
|
||||
return {
|
||||
'process': {
|
||||
'ppid': ppid,
|
||||
'name': name,
|
||||
'nova_flavor_id': nova_flavor_id,
|
||||
'glance_image_id': glance_image_id,
|
||||
'keypair_id': keypair_id,
|
||||
'securitygroup_ids': securitygroup_ids,
|
||||
'userdata': userdata,
|
||||
'args': args
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
userdata = '#!/bin/sh echo "Hello"'
|
||||
f = tempfile.TemporaryFile()
|
||||
f.write(userdata)
|
||||
f.seek(0)
|
||||
params = {
|
||||
'ppid': '11111111',
|
||||
'name':'process1',
|
||||
'nova_flavor_id': 1,
|
||||
'glance_image_id': '22222222',
|
||||
'keypair_id': '33333333',
|
||||
'securitygroup_ids': ['44444444', '55555555'],
|
||||
'userdata': f,
|
||||
'args': {
|
||||
"key1": "value1",
|
||||
"key2": "value2"
|
||||
}
|
||||
}
|
||||
process = self.cs.processes.create(self.gid, **params)
|
||||
body = self._create_body(**params)
|
||||
body['process']['userdata'] = base64.b64encode(userdata)
|
||||
self.cs.assert_called('POST', '/groups/%s/processes' % self.gid, body)
|
||||
self.assertIsInstance(process, self.process_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
ppid = 'aaaaaaaa'
|
||||
self.assertRaises(exc.CommandError, self.cs.processes.create,
|
||||
self.gid, ppid=ppid, securitygroup_ids='invalid')
|
||||
self.assertRaises(exc.CommandError, self.cs.processes.create,
|
||||
self.gid, ppid=ppid, args='invalid')
|
||||
|
||||
def _update_body(self, app_status):
|
||||
return {
|
||||
'process': {
|
||||
'app_status': app_status
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
app_status = 'ACTIVE'
|
||||
pid = 'aaaaaaaa'
|
||||
process = self.cs.processes.update(self.gid,
|
||||
pid, app_status)
|
||||
body = self._update_body(app_status)
|
||||
self.cs.assert_called('PUT', '/groups/%s/processes/%s' % (self.gid, pid), body)
|
||||
self.assertIsInstance(process, self.process_type)
|
||||
|
||||
def test_delete(self):
|
||||
pid = 'aaaaaaaa'
|
||||
self.cs.processes.delete(self.gid, pid)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/processes/%s' % (self.gid, pid))
|
@ -1,130 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import base64
|
||||
import tempfile
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import proxy
|
||||
|
||||
|
||||
class ProxyTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ProxyTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.proxy_type = proxy.Proxy
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_get(self):
|
||||
proxy = self.cs.proxy.get(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/proxy' % self.gid)
|
||||
self.assertEqual(self.gid, proxy.gid)
|
||||
self.assertEqual(self.user_id, proxy.user_id)
|
||||
self.assertEqual(self.project_id, proxy.project_id)
|
||||
self.assertEqual(None, proxy.ppid)
|
||||
self.assertEqual('pppppppp', proxy.nova_instance_id)
|
||||
self.assertEqual('proxy', proxy.name)
|
||||
self.assertEqual('xxxxxxxx', proxy.glance_image_id)
|
||||
self.assertEqual('yyyyyyyy', proxy.nova_flavor_id)
|
||||
self.assertEqual('iiiiiiii', proxy.keypair_id)
|
||||
self.assertEqual(['jjjjjjjj', 'kkkkkkkk'], proxy.securitygroup_ids)
|
||||
networks = [{
|
||||
'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'
|
||||
}]
|
||||
self.assertEqual(networks, proxy.networks)
|
||||
self.assertEqual('ACTIVE', proxy.app_status)
|
||||
self.assertEqual('ACTIVE', proxy.status)
|
||||
self.assertEqual('IyEvYmluL3NoICBlY2hvICJIZWxsbyI=', proxy.userdata)
|
||||
args = {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
self.assertEqual(args, proxy.args)
|
||||
self.assertEqual('ipc_endpoint', proxy.ipc_endpoint)
|
||||
self.assertEqual('shm_endpoint', proxy.shm_endpoint)
|
||||
self.assertEqual('fs_endpoint', proxy.fs_endpoint)
|
||||
|
||||
|
||||
def _create_body(self, name=None, nova_flavor_id=None,
|
||||
glance_image_id=None, keypair_id=None,
|
||||
securitygroup_ids=None, userdata=None, args=None):
|
||||
return {
|
||||
'proxy': {
|
||||
'name': name,
|
||||
'nova_flavor_id': nova_flavor_id,
|
||||
'glance_image_id': glance_image_id,
|
||||
'keypair_id': keypair_id,
|
||||
'securitygroup_ids': securitygroup_ids,
|
||||
'userdata': userdata,
|
||||
'args': args
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
userdata = '#!/bin/sh echo "Hello"'
|
||||
f = tempfile.TemporaryFile()
|
||||
f.write(userdata)
|
||||
f.seek(0)
|
||||
params = {
|
||||
'name':'proxy',
|
||||
'nova_flavor_id': 1,
|
||||
'glance_image_id': '22222222',
|
||||
'keypair_id': '33333333',
|
||||
'securitygroup_ids': ['44444444', '55555555'],
|
||||
'userdata': f,
|
||||
'args': {
|
||||
"key1": "value1",
|
||||
"key2": "value2"
|
||||
}
|
||||
}
|
||||
proxy = self.cs.proxy.create(self.gid, **params)
|
||||
body = self._create_body(**params)
|
||||
body['proxy']['userdata'] = base64.b64encode(userdata)
|
||||
self.cs.assert_called('POST', '/groups/%s/proxy' % self.gid, body)
|
||||
self.assertIsInstance(proxy, self.proxy_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
self.assertRaises(exc.CommandError, self.cs.proxy.create,
|
||||
self.gid, securitygroup_ids='invalid')
|
||||
self.assertRaises(exc.CommandError, self.cs.proxy.create,
|
||||
self.gid, args='invalid')
|
||||
|
||||
def _update_body(self, ipc_endpoint=None, shm_endpoint=None,
|
||||
fs_endpoint=None, app_status=None):
|
||||
return {
|
||||
'proxy': {
|
||||
'ipc_endpoint': ipc_endpoint,
|
||||
'shm_endpoint': shm_endpoint,
|
||||
'fs_endpoint': fs_endpoint,
|
||||
'app_status': app_status
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
ipc_endpoint = 'ipc_endpoint'
|
||||
shm_endpoint = 'shm_endpoint'
|
||||
fs_endpoint = 'fs_endpoint'
|
||||
app_status = 'ACTIVE'
|
||||
proxy = self.cs.proxy.update(self.gid, shm_endpoint,
|
||||
ipc_endpoint, fs_endpoint,
|
||||
app_status)
|
||||
body = self._update_body(ipc_endpoint, shm_endpoint,
|
||||
fs_endpoint, app_status)
|
||||
self.cs.assert_called('PUT', '/groups/%s/proxy' % self.gid, body)
|
||||
self.assertIsInstance(proxy, self.proxy_type)
|
@ -1,111 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import securitygroups
|
||||
|
||||
|
||||
class SecuritygroupsTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(SecuritygroupsTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.securitygroup_type = securitygroups.Securitygroup
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
securitygroups = self.cs.securitygroups.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/securitygroups' % self.gid)
|
||||
for securitygroup in securitygroups:
|
||||
self.assertIsInstance(securitygroup, self.securitygroup_type)
|
||||
|
||||
def test_get(self):
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
securitygroup = self.cs.securitygroups.get(self.gid, securitygroup_id)
|
||||
self.cs.assert_called('GET', '/groups/%s/securitygroups/%s' % (self.gid, securitygroup_id))
|
||||
self.assertEqual(self.gid, securitygroup.gid)
|
||||
self.assertEqual(self.user_id, securitygroup.user_id)
|
||||
self.assertEqual(self.project_id, securitygroup.project_id)
|
||||
self.assertEqual(securitygroup_id, securitygroup.securitygroup_id)
|
||||
self.assertEqual('pppppppp', securitygroup.neutron_securitygroup_id)
|
||||
self.assertEqual('securitygroup1', securitygroup.name)
|
||||
self.assertEqual(True, securitygroup.is_default)
|
||||
self.assertEqual('Exist', securitygroup.status)
|
||||
|
||||
def _create_body(self, name, is_default, rules):
|
||||
return {
|
||||
'securitygroup': {
|
||||
'name': name,
|
||||
'is_default': is_default,
|
||||
'securitygrouprules': rules
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
name = 'securitygroup1'
|
||||
is_default = True
|
||||
rules = [{
|
||||
'protocol': 'tcp',
|
||||
'port_range_max': '80',
|
||||
'port_range_min': '80',
|
||||
'remote_ip_prefix': '0.0.0.0/0'
|
||||
}]
|
||||
securitygroup = self.cs.securitygroups.create(self.gid, name, is_default, rules)
|
||||
body = self._create_body(name, is_default, rules)
|
||||
self.cs.assert_called('POST', '/groups/%s/securitygroups' % self.gid, body)
|
||||
self.assertIsInstance(securitygroup, self.securitygroup_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
name = 'securitygroup1'
|
||||
rules = [{
|
||||
'protocol': 'tcp',
|
||||
'port_range_max': '80',
|
||||
'port_range_min': '80',
|
||||
'remote_ip_prefix': '0.0.0.0/0'
|
||||
}]
|
||||
self.assertRaises(exc.CommandError, self.cs.securitygroups.create,
|
||||
self.gid, name, 'invalid', rules)
|
||||
|
||||
rules = {}
|
||||
self.assertRaises(exc.CommandError, self.cs.securitygroups.create,
|
||||
self.gid, name, True, rules)
|
||||
|
||||
def _update_body(self, is_default):
|
||||
return {
|
||||
'securitygroup': {
|
||||
'is_default': is_default
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
is_default = True
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
securitygroup = self.cs.securitygroups.update(self.gid, securitygroup_id, is_default)
|
||||
body = self._update_body(is_default)
|
||||
self.cs.assert_called('PUT', '/groups/%s/securitygroups/%s' % (self.gid, securitygroup_id), body)
|
||||
self.assertIsInstance(securitygroup, self.securitygroup_type)
|
||||
|
||||
def test_update_invalid_parameters(self):
|
||||
is_default = 'invalid'
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
self.assertRaises(exc.CommandError, self.cs.securitygroups.update,
|
||||
self.gid, securitygroup_id, is_default)
|
||||
|
||||
def test_delete(self):
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
self.cs.securitygroups.delete(self.gid, securitygroup_id)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/securitygroups/%s' % (self.gid, securitygroup_id))
|
@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
echo Hello
|
@ -1,34 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
|
||||
def keyvalue_to_dict(string):
|
||||
"""
|
||||
Return a dict made from comma separated key-value strings
|
||||
|
||||
:param string: comma separated key-value pairs
|
||||
like 'key1=value1,key2=value2'
|
||||
:return: dict
|
||||
"""
|
||||
try:
|
||||
d = {}
|
||||
pairs = string.split(',')
|
||||
for pair in pairs:
|
||||
(k, v) = pair.split('=', 1)
|
||||
d.update({k: v})
|
||||
return d
|
||||
except ValueError:
|
||||
msg = "%r is not in the format of key=value" % string
|
||||
raise argparse.ArgumentTypeError(msg)
|
@ -1,58 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.openstack.common.apiclient import base
|
||||
|
||||
Resource = base.Resource
|
||||
|
||||
|
||||
class Manager(object):
|
||||
|
||||
resource_class = None
|
||||
|
||||
def __init__(self, api):
|
||||
self.api = api
|
||||
|
||||
def _list(self, url, response_key, obj_class=None):
|
||||
_resp, body = self.api.client.get(url)
|
||||
|
||||
if obj_class is None:
|
||||
obj_class = self.resource_class
|
||||
|
||||
data = body[response_key]
|
||||
|
||||
objs = []
|
||||
for res in data:
|
||||
if res:
|
||||
obj = obj_class(self, res, loaded=True)
|
||||
objs.append(obj)
|
||||
|
||||
return objs
|
||||
|
||||
def _get(self, url, response_key):
|
||||
_resp, body = self.api.client.get(url)
|
||||
obj = self.resource_class(self, body[response_key], loaded=True)
|
||||
return obj
|
||||
|
||||
def _create(self, url, body, response_key):
|
||||
_resp, body = self.api.client.post(url, body=body)
|
||||
obj = self.resource_class(self, body[response_key])
|
||||
return obj
|
||||
|
||||
def _delete(self, url):
|
||||
_resp, _body = self.api.client.delete(url)
|
||||
|
||||
def _update(self, url, body, response_key):
|
||||
_resp, body = self.api.client.put(url, body=body)
|
||||
if body:
|
||||
return self.resource_class(self, body[response_key])
|
@ -1,48 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.client import HTTPClient
|
||||
from rackclient.v1.groups import GroupManager
|
||||
from rackclient.v1.keypairs import KeypairManager
|
||||
from rackclient.v1.networks import NetworkManager
|
||||
from rackclient.v1.processes import ProcessManager
|
||||
from rackclient.v1.proxy import ProxyManager
|
||||
from rackclient.v1.securitygroups import SecuritygroupManager
|
||||
|
||||
|
||||
class Client(object):
|
||||
"""
|
||||
Top-level Object to access the rack API.
|
||||
|
||||
Create an rackclient instance::
|
||||
|
||||
>>> from rackclient.v1 import client
|
||||
>>> client = client.Client()
|
||||
|
||||
Then call methods on its managers::
|
||||
|
||||
>>> client.processes.list()
|
||||
...
|
||||
>>> client.groups.list()
|
||||
...
|
||||
"""
|
||||
def __init__(self, rack_url=None, http_log_debug=False):
|
||||
self.rack_url = rack_url
|
||||
self.http_log_debug = http_log_debug
|
||||
self.groups = GroupManager(self)
|
||||
self.keypairs = KeypairManager(self)
|
||||
self.securitygroups = SecuritygroupManager(self)
|
||||
self.networks = NetworkManager(self)
|
||||
self.processes = ProcessManager(self)
|
||||
self.proxy = ProxyManager(self)
|
||||
self.client = HTTPClient(rack_url, http_log_debug=http_log_debug)
|
@ -1,361 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from ConfigParser import ConfigParser
|
||||
from ConfigParser import NoOptionError
|
||||
|
||||
from cliff.command import Command
|
||||
from cliff.lister import Lister
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from rackclient import client
|
||||
from rackclient import exceptions
|
||||
from rackclient import utils
|
||||
|
||||
|
||||
def _make_print_data(gid, name, description, user_id, project_id,
|
||||
status, keypairs=None, securitygroups=None,
|
||||
networks=None, proxy=None, processes=None):
|
||||
columns = ['gid', 'name', 'description', 'user_id', 'project_id', 'status']
|
||||
data = [gid, name, description, user_id, project_id, status]
|
||||
|
||||
if keypairs is not None:
|
||||
columns.append('keypairs')
|
||||
data.append(keypairs)
|
||||
|
||||
if securitygroups is not None:
|
||||
columns.append('securitygroups')
|
||||
data.append(securitygroups)
|
||||
|
||||
if networks is not None:
|
||||
columns.append('networks')
|
||||
data.append(networks)
|
||||
|
||||
if proxy is not None:
|
||||
columns.append('proxy')
|
||||
data.append(proxy)
|
||||
|
||||
if processes is not None:
|
||||
columns.append('processes')
|
||||
data.append(processes)
|
||||
|
||||
return columns, data
|
||||
|
||||
|
||||
class ListGroups(Lister):
|
||||
"""
|
||||
Print a list of all groups.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ListGroups, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
groups = self.client.groups.list()
|
||||
return (
|
||||
('gid', 'name', 'description', 'status'),
|
||||
((g.gid, g.name, g.description, g.status) for g in groups)
|
||||
)
|
||||
|
||||
|
||||
class ShowGroup(ShowOne):
|
||||
"""
|
||||
Show details about the given group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ShowGroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(ShowGroup, self).get_parser(prog_name)
|
||||
parser.add_argument('gid', metavar='<gid>',
|
||||
default=os.environ.get('RACK_GID'),
|
||||
help="Group id")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
group = self.client.groups.get(parsed_args.gid)
|
||||
keypairs = self.client.keypairs.list(parsed_args.gid)
|
||||
securitygroups = self.client.securitygroups.list(parsed_args.gid)
|
||||
networks = self.client.networks.list(parsed_args.gid)
|
||||
processes = self.client.processes.list(parsed_args.gid)
|
||||
try:
|
||||
proxy = self.client.proxy.get(parsed_args.gid)
|
||||
except Exception:
|
||||
proxy = None
|
||||
|
||||
return _make_print_data(
|
||||
group.gid,
|
||||
group.name,
|
||||
group.description,
|
||||
group.user_id,
|
||||
group.project_id,
|
||||
group.status,
|
||||
','.join([k.keypair_id for k in keypairs]),
|
||||
','.join([s.securitygroup_id for s in securitygroups]),
|
||||
','.join([n.network_id for n in networks]),
|
||||
proxy.pid if proxy else ''
|
||||
)
|
||||
|
||||
|
||||
class CreateGroup(ShowOne):
|
||||
"""
|
||||
Create a new group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(CreateGroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(CreateGroup, self).get_parser(prog_name)
|
||||
parser.add_argument('name', metavar='<name>',
|
||||
help="Name of the new group")
|
||||
parser.add_argument('--description', metavar='<description>',
|
||||
help="Details of the new group")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
group = self.client.groups.create(
|
||||
parsed_args.name,
|
||||
parsed_args.description)
|
||||
return _make_print_data(
|
||||
group.gid,
|
||||
group.name,
|
||||
group.description,
|
||||
group.user_id,
|
||||
group.project_id,
|
||||
group.status
|
||||
)
|
||||
|
||||
|
||||
class UpdateGroup(ShowOne):
|
||||
"""
|
||||
Update the specified group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(UpdateGroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(UpdateGroup, self).get_parser(prog_name)
|
||||
parser.add_argument('gid', metavar='<gid>',
|
||||
help="Group id")
|
||||
parser.add_argument('--name', metavar='<name>',
|
||||
help="Name of the group")
|
||||
parser.add_argument('--description', metavar='<description>',
|
||||
help="Details of the group")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
group = self.client.groups.update(parsed_args.gid,
|
||||
parsed_args.name,
|
||||
parsed_args.description)
|
||||
return _make_print_data(
|
||||
group.gid,
|
||||
group.name,
|
||||
group.description,
|
||||
group.user_id,
|
||||
group.project_id,
|
||||
group.status
|
||||
)
|
||||
|
||||
|
||||
class DeleteGroup(Command):
|
||||
"""
|
||||
Delete the specified group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(DeleteGroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(DeleteGroup, self).get_parser(prog_name)
|
||||
parser.add_argument('gid', metavar='<gid>',
|
||||
help="Group id")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.client.groups.delete(parsed_args.gid)
|
||||
|
||||
|
||||
class InitGroup(ShowOne):
|
||||
"""
|
||||
Create a group, a keypair, a security group, a network and
|
||||
a rack-proxy based on the specified configuration file.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(InitGroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(InitGroup, self).get_parser(prog_name)
|
||||
parser.add_argument('config', metavar='<config-file>',
|
||||
help=("Configuration file including parameters"
|
||||
" of the new group"))
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
config = ConfigParser()
|
||||
config.read(parsed_args.config)
|
||||
|
||||
group_description = None
|
||||
keypair_name = None
|
||||
keypair_is_default = True
|
||||
securitygroup_name = None
|
||||
securitygroup_is_default = True
|
||||
securitygroup_rules = None
|
||||
network_name = None
|
||||
network_is_admin = True
|
||||
network_gateway_ip = None
|
||||
network_dns_nameservers = []
|
||||
proxy_name = None
|
||||
|
||||
# Required options
|
||||
try:
|
||||
group_name = config.get('group', 'name')
|
||||
network_cidr = config.get('network', 'cidr')
|
||||
network_ext_router_id = config.get('network', 'ext_router_id')
|
||||
proxy_flavor = config.get('proxy', 'nova_flavor_id')
|
||||
proxy_image = config.get('proxy', 'glance_image_id')
|
||||
except NoOptionError as e:
|
||||
msg = "%s in %s section is required." % (e.option, e.section)
|
||||
raise exceptions.CommandError(msg)
|
||||
|
||||
try:
|
||||
securitygroup_rules = config.get('securitygroup', 'rules').split()
|
||||
securitygroup_rules = \
|
||||
[utils.keyvalue_to_dict(r) for r in securitygroup_rules]
|
||||
except argparse.ArgumentTypeError:
|
||||
raise exceptions.CommandError(
|
||||
"securitygroup rules are not valid formart")
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
group_description = config.get('group', 'description')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
keypair_name = config.get('keypair', 'name')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
keypair_is_default = config.get('keypair', 'is_default')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
securitygroup_name = config.get('securitygroup', 'name')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
securitygroup_is_default = config.get('securitygroup',
|
||||
'is_default')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
network_name = config.get('network', 'name')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
network_is_admin = config.get('network', 'is_admin')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
network_gateway_ip = config.get('network', 'gateway_ip')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
network_dns_nameservers = config.get(
|
||||
'network',
|
||||
'dns_nameservers').split()
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
proxy_name = config.get('proxy', 'name')
|
||||
except NoOptionError:
|
||||
pass
|
||||
|
||||
group = self.client.groups.create(group_name, group_description)
|
||||
keypair = self.client.keypairs.create(group.gid, keypair_name,
|
||||
keypair_is_default)
|
||||
securitygroup = self.client.securitygroups.create(
|
||||
group.gid,
|
||||
securitygroup_name,
|
||||
securitygroup_is_default,
|
||||
securitygroup_rules)
|
||||
network = self.client.networks.create(
|
||||
group.gid, network_cidr, network_name,
|
||||
network_is_admin, network_gateway_ip,
|
||||
network_dns_nameservers,
|
||||
network_ext_router_id)
|
||||
proxy = self.client.proxy.create(
|
||||
group.gid, name=proxy_name,
|
||||
nova_flavor_id=proxy_flavor,
|
||||
glance_image_id=proxy_image,
|
||||
keypair_id=keypair.keypair_id,
|
||||
securitygroup_ids=[securitygroup.securitygroup_id])
|
||||
|
||||
columns = ['gid', 'keypair_id', 'securitygroup_id', 'network_id',
|
||||
'proxy_pid', 'proxy_name']
|
||||
data = [group.gid, keypair.keypair_id, securitygroup.securitygroup_id,
|
||||
network.network_id, proxy.pid, proxy.name]
|
||||
|
||||
return columns, data
|
@ -1,199 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from cliff.command import Command
|
||||
from cliff.lister import Lister
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from rackclient import client
|
||||
|
||||
|
||||
def _make_print_data(keypair_id, name, nova_keypair_id, is_default, private_key,
|
||||
gid, user_id, project_id, status=None):
|
||||
columns = ['keypair_id', 'name', 'nova_keypair_id', 'is_default',
|
||||
'private_key', 'gid', 'user_id', 'project_id']
|
||||
data = [keypair_id, name, nova_keypair_id, is_default,
|
||||
private_key, gid, user_id, project_id]
|
||||
|
||||
if status is not None:
|
||||
columns.append('status')
|
||||
data.append(status)
|
||||
|
||||
return columns, data
|
||||
|
||||
|
||||
class ListKeypairs(Lister):
|
||||
"""
|
||||
Print a list of all keypairs in the specified group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ListKeypairs, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
keypairs = self.client.keypairs.list(self.gid)
|
||||
return (
|
||||
('keypair_id', 'name', 'is_default', 'status'),
|
||||
((k.keypair_id, k.name, k.is_default, k.status) for k in keypairs)
|
||||
)
|
||||
|
||||
|
||||
class ShowKeypair(ShowOne):
|
||||
"""
|
||||
Show details about the given keypair.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ShowKeypair, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(ShowKeypair, self).get_parser(prog_name)
|
||||
parser.add_argument('keypair_id', metavar='<keypair-id>',
|
||||
help="Keypair ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
keypair = self.client.keypairs.get(self.gid,
|
||||
parsed_args.keypair_id)
|
||||
return _make_print_data(
|
||||
keypair.keypair_id,
|
||||
keypair.name,
|
||||
keypair.nova_keypair_id,
|
||||
keypair.is_default,
|
||||
keypair.private_key,
|
||||
keypair.gid,
|
||||
keypair.user_id,
|
||||
keypair.project_id,
|
||||
keypair.status
|
||||
)
|
||||
|
||||
|
||||
class CreateKeypair(ShowOne):
|
||||
"""
|
||||
Create a new keypair.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(CreateKeypair, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(CreateKeypair, self).get_parser(prog_name)
|
||||
parser.add_argument('--name', metavar='<name>',
|
||||
help="Name of the new keypair")
|
||||
parser.add_argument('--is-default', metavar='<true/false>',
|
||||
default=False,
|
||||
help=("Defaults to the default keypair of"
|
||||
" the group"))
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
keypair = self.client.keypairs.create(self.gid, parsed_args.name,
|
||||
parsed_args.is_default)
|
||||
return _make_print_data(
|
||||
keypair.keypair_id,
|
||||
keypair.name,
|
||||
keypair.nova_keypair_id,
|
||||
keypair.is_default,
|
||||
keypair.private_key,
|
||||
keypair.gid,
|
||||
keypair.user_id,
|
||||
keypair.project_id,
|
||||
)
|
||||
|
||||
|
||||
class UpdateKeypair(ShowOne):
|
||||
"""
|
||||
Update the specified keypair.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(UpdateKeypair, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(UpdateKeypair, self).get_parser(prog_name)
|
||||
parser.add_argument('keypair_id', metavar='<keypair-id>',
|
||||
help="Keypair ID")
|
||||
parser.add_argument('--is-default', metavar='<true/false>',
|
||||
required=True,
|
||||
help="Defaults to the default keypair of the group")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
keypair = self.client.keypairs.update(self.gid,
|
||||
parsed_args.keypair_id,
|
||||
parsed_args.is_default)
|
||||
return _make_print_data(
|
||||
keypair.keypair_id,
|
||||
keypair.name,
|
||||
keypair.nova_keypair_id,
|
||||
keypair.is_default,
|
||||
keypair.private_key,
|
||||
keypair.gid,
|
||||
keypair.user_id,
|
||||
keypair.project_id,
|
||||
)
|
||||
|
||||
|
||||
class DeleteKeypair(Command):
|
||||
"""
|
||||
Delete the specified keypair.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(DeleteKeypair, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(DeleteKeypair, self).get_parser(prog_name)
|
||||
parser.add_argument('keypair_id', metavar='<keypair-id>',
|
||||
help="Keypair ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.client.keypairs.delete(self.gid, parsed_args.keypair_id)
|
@ -1,99 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from rackclient import client
|
||||
|
||||
|
||||
class Montecarlo(ShowOne):
|
||||
"""
|
||||
The application to approximate the circular constant
|
||||
with the Monte Carlo method
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(Montecarlo, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(Montecarlo, self).get_parser(prog_name)
|
||||
|
||||
parser.add_argument('--msg_limit_time', metavar='<integer>',
|
||||
default=300, type=int,
|
||||
help="Parent waits for notifications of "
|
||||
"preparation completion from children "
|
||||
"until the timer reaches to msg_limit_time "
|
||||
"seconds")
|
||||
parser.add_argument('--noterm', metavar='<true/false>',
|
||||
default=False,
|
||||
help="(Intended for debugging) "
|
||||
"If true, all processes won't be deleted")
|
||||
parser.add_argument('--image', metavar='<image-id>',
|
||||
required=True,
|
||||
help="(Required) ID of the montecarlo image")
|
||||
parser.add_argument('--flavor', metavar='<flavor-id>',
|
||||
required=True,
|
||||
help="(Required) ID of a flavor")
|
||||
parser.add_argument('--trials', metavar='<integer>',
|
||||
required=True, type=int,
|
||||
help="(Required) The number of trials in a "
|
||||
"simulation")
|
||||
parser.add_argument('--workers', metavar='<integer>',
|
||||
required=True, type=int,
|
||||
help="(Required) The number of workers that "
|
||||
"will be launched")
|
||||
parser.add_argument('--stdout', metavar='</file/path>',
|
||||
required=True,
|
||||
help="(Required) File path on Swift to output "
|
||||
"the simulation report to")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
options = {
|
||||
"trials": parsed_args.trials,
|
||||
"workers": parsed_args.workers,
|
||||
"stdout": parsed_args.stdout,
|
||||
"msg_limit_time": parsed_args.msg_limit_time,
|
||||
"noterm": parsed_args.noterm
|
||||
}
|
||||
|
||||
process = self.client.processes.create(
|
||||
self.gid, name="montecarlo",
|
||||
nova_flavor_id=parsed_args.flavor,
|
||||
glance_image_id=parsed_args.image,
|
||||
args=options)
|
||||
|
||||
process_args = process.args
|
||||
process_args.pop('gid')
|
||||
process_args.pop('pid')
|
||||
process_args.pop('ppid', None)
|
||||
process_args.pop('proxy_ip', None)
|
||||
process_args.pop('rackapi_ip', None)
|
||||
|
||||
cmd = process.name
|
||||
for k, v in sorted(process_args.items()):
|
||||
cmd += ' --' + k + ' ' + v
|
||||
|
||||
columns = ['pid', 'ppid', 'cmd']
|
||||
data = [process.pid, process.ppid, cmd]
|
||||
|
||||
return columns, data
|
@ -1,179 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from cliff.command import Command
|
||||
from cliff.lister import Lister
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from rackclient import client
|
||||
from rackclient import exceptions
|
||||
|
||||
|
||||
def _make_print_data(network_id, name, neutron_network_id, is_admin,
|
||||
cidr, ext_router_id, gid, user_id, project_id,
|
||||
status=None):
|
||||
columns = ['network_id', 'name', 'neutron_network_id', 'is_admin',
|
||||
'cidr', 'ext_router_id', 'gid', 'user_id', 'project_id']
|
||||
data = [network_id, name, neutron_network_id, is_admin,
|
||||
cidr, ext_router_id, gid, user_id, project_id]
|
||||
|
||||
if status is not None:
|
||||
columns.append('status')
|
||||
data.append(status)
|
||||
|
||||
return columns, data
|
||||
|
||||
|
||||
class ListNetworks(Lister):
|
||||
"""
|
||||
Print a list of all networks in the specified group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ListNetworks, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
networks = self.client.networks.list(self.gid)
|
||||
return (
|
||||
('network_id', 'name', 'is_admin', 'status'),
|
||||
((n.network_id, n.name, n.is_admin, n.status) for n in networks)
|
||||
)
|
||||
|
||||
|
||||
class ShowNetwork(ShowOne):
|
||||
"""
|
||||
Show details about the given network group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ShowNetwork, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(ShowNetwork, self).get_parser(prog_name)
|
||||
parser.add_argument('network_id', metavar='<network-id>',
|
||||
help="Network ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
network = self.client.networks.get(self.gid,
|
||||
parsed_args.network_id)
|
||||
return _make_print_data(
|
||||
network.network_id,
|
||||
network.name,
|
||||
network.neutron_network_id,
|
||||
network.is_admin,
|
||||
network.cidr,
|
||||
network.ext_router_id,
|
||||
network.gid,
|
||||
network.user_id,
|
||||
network.project_id,
|
||||
network.status,
|
||||
)
|
||||
|
||||
|
||||
class CreateNetwork(ShowOne):
|
||||
"""
|
||||
Create a new securitygroup.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(CreateNetwork, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(CreateNetwork, self).get_parser(prog_name)
|
||||
parser.add_argument('cidr', metavar='<cidr>',
|
||||
help="Cidr of the new network")
|
||||
parser.add_argument('--name', metavar='<name>',
|
||||
help="Name of the new securitygroup")
|
||||
parser.add_argument('--is-admin', metavar='<true/false>',
|
||||
default=False,
|
||||
help="")
|
||||
parser.add_argument('--gateway-ip', metavar='<x.x.x.x>',
|
||||
help="Gateway ip address of the new network")
|
||||
parser.add_argument('--dns-nameserver', metavar='<x.x.x.x>',
|
||||
dest='dns_nameservers', action='append',
|
||||
help=("DNS server for the new network "
|
||||
"(Can be repeated)"))
|
||||
parser.add_argument('--ext-router-id', metavar='<router-id>',
|
||||
help="Router id the new network connects to")
|
||||
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
network = self.client.networks.create(self.gid,
|
||||
parsed_args.cidr,
|
||||
parsed_args.name,
|
||||
parsed_args.is_admin,
|
||||
parsed_args.gateway_ip,
|
||||
parsed_args.dns_nameservers,
|
||||
parsed_args.ext_router_id)
|
||||
return _make_print_data(
|
||||
network.network_id,
|
||||
network.name,
|
||||
network.neutron_network_id,
|
||||
network.is_admin,
|
||||
network.cidr,
|
||||
network.ext_router_id,
|
||||
network.gid,
|
||||
network.user_id,
|
||||
network.project_id
|
||||
)
|
||||
|
||||
|
||||
class DeleteNetwork(Command):
|
||||
"""
|
||||
Delete the specified network.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(DeleteNetwork, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(DeleteNetwork, self).get_parser(prog_name)
|
||||
parser.add_argument('network_id', metavar='<network-id>',
|
||||
help="Network ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.client.networks.delete(self.gid,
|
||||
parsed_args.network_id)
|
@ -1,251 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from cliff.command import Command
|
||||
from cliff.lister import Lister
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from rackclient import client
|
||||
from rackclient import exceptions
|
||||
from rackclient import utils
|
||||
|
||||
|
||||
class PS(Lister):
|
||||
"""
|
||||
Print a list of all processes in the specified group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(PS, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
processes = self.client.processes.list(self.gid)
|
||||
|
||||
def _make_command(process):
|
||||
p_args = process.args
|
||||
p_args.pop('gid')
|
||||
p_args.pop('pid')
|
||||
p_args.pop('ppid', None)
|
||||
p_args.pop('proxy_ip', None)
|
||||
p_args.pop('rackapi_ip', None)
|
||||
|
||||
cmd = process.name
|
||||
for k, v in sorted(p_args.items()):
|
||||
cmd += ' --' + k + ' ' + v
|
||||
|
||||
return cmd
|
||||
|
||||
return (
|
||||
('pid', 'ppid', 'command'),
|
||||
((p.pid, p.ppid, _make_command(p)) for p in processes)
|
||||
)
|
||||
|
||||
|
||||
def _make_print_data(pid, ppid, name, nova_instance_id, nova_flavor_id,
|
||||
glance_image_id, keypair_id, securitygroup_ids, networks,
|
||||
userdata, args, app_status, gid, user_id, project_id,
|
||||
status=None):
|
||||
columns = ['pid', 'ppid', 'name', 'nova_instance_id', 'nova_flavor_id',
|
||||
'glance_image_id', 'keypair_id', 'securitygroup_ids',
|
||||
'networks', 'userdata', 'args', 'app_status', 'gid', 'user_id',
|
||||
'project_id']
|
||||
data = [pid, ppid, name, nova_instance_id, nova_flavor_id,
|
||||
glance_image_id, keypair_id, securitygroup_ids, networks,
|
||||
userdata, args, app_status, gid, user_id, project_id]
|
||||
|
||||
if status is not None:
|
||||
columns.append('status')
|
||||
data.append(status)
|
||||
|
||||
return columns, data
|
||||
|
||||
|
||||
class Show(ShowOne):
|
||||
"""
|
||||
Show details about the given process.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(Show, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(Show, self).get_parser(prog_name)
|
||||
|
||||
parser.add_argument('pid', metavar='<pid>',
|
||||
help="process ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
process = self.client.processes.get(self.gid, parsed_args.pid)
|
||||
|
||||
sg_ids = process.securitygroup_ids
|
||||
if sg_ids:
|
||||
sg_ids = ','.join(sg_ids)
|
||||
|
||||
process_args = process.args
|
||||
if process_args:
|
||||
s = ''
|
||||
for k, v in sorted(process_args.items()):
|
||||
s += k + '=' + v + '\n'
|
||||
process_args = s.rstrip('\n')
|
||||
|
||||
return _make_print_data(
|
||||
process.pid,
|
||||
process.ppid,
|
||||
process.name,
|
||||
process.nova_instance_id,
|
||||
process.nova_flavor_id,
|
||||
process.glance_image_id,
|
||||
process.keypair_id,
|
||||
sg_ids,
|
||||
process.networks,
|
||||
process.userdata,
|
||||
process_args,
|
||||
process.app_status,
|
||||
process.gid,
|
||||
process.user_id,
|
||||
process.project_id,
|
||||
process.status
|
||||
)
|
||||
|
||||
|
||||
class Boot(ShowOne):
|
||||
"""
|
||||
Boot a process.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(Boot, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(Boot, self).get_parser(prog_name)
|
||||
|
||||
parser.add_argument('--ppid', metavar='<ppid>',
|
||||
help="ID of a parent process")
|
||||
parser.add_argument('--name', metavar='<name>',
|
||||
help="Name of the new process")
|
||||
parser.add_argument('--flavor', metavar='<flavor-id>',
|
||||
help="ID of a flavor that is provided by Nova")
|
||||
parser.add_argument('--image', metavar='<image-id>',
|
||||
help="ID of a image that is provided by Glance")
|
||||
parser.add_argument('--keypair', metavar='<keypair-id>',
|
||||
help="Keypair ID")
|
||||
parser.add_argument('--securitygroup', metavar='<securitygroup-id>',
|
||||
dest='securitygroup_ids', action='append',
|
||||
default=[],
|
||||
help="Securitygroup ID (Can be repeated)")
|
||||
parser.add_argument('--userdata', metavar='</file/path>',
|
||||
help="Userdata file path")
|
||||
parser.add_argument('--args', metavar='<key1=value1,key2=value2,...>',
|
||||
type=utils.keyvalue_to_dict,
|
||||
help=("Key-value pairs to be passed to "
|
||||
"metadata server"))
|
||||
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
userdata = None
|
||||
if parsed_args.userdata:
|
||||
try:
|
||||
userdata = open(parsed_args.userdata)
|
||||
except IOError:
|
||||
raise exceptions.CommandError(
|
||||
"Can't open '%s'" % parsed_args.userdata)
|
||||
|
||||
process = self.client.processes.create(
|
||||
gid=self.gid,
|
||||
ppid=parsed_args.ppid,
|
||||
name=parsed_args.name,
|
||||
nova_flavor_id=parsed_args.flavor,
|
||||
glance_image_id=parsed_args.image,
|
||||
keypair_id=parsed_args.keypair,
|
||||
securitygroup_ids=parsed_args.securitygroup_ids,
|
||||
userdata=userdata,
|
||||
args=parsed_args.args)
|
||||
|
||||
sg_ids = process.securitygroup_ids
|
||||
if sg_ids:
|
||||
sg_ids = ','.join(sg_ids)
|
||||
|
||||
process_args = process.args
|
||||
if process_args:
|
||||
s = ''
|
||||
for k, v in sorted(process_args.items()):
|
||||
s += k + '=' + v + '\n'
|
||||
process_args = s.rstrip('\n')
|
||||
|
||||
return _make_print_data(
|
||||
process.pid,
|
||||
process.ppid,
|
||||
process.name,
|
||||
process.nova_instance_id,
|
||||
process.nova_flavor_id,
|
||||
process.glance_image_id,
|
||||
process.keypair_id,
|
||||
sg_ids,
|
||||
process.networks,
|
||||
process.userdata,
|
||||
process_args,
|
||||
process.app_status,
|
||||
process.gid,
|
||||
process.user_id,
|
||||
process.project_id,
|
||||
)
|
||||
|
||||
|
||||
class Kill(Command):
|
||||
"""
|
||||
Delete the specified process.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(Kill, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(Kill, self).get_parser(prog_name)
|
||||
parser.add_argument('pid', metavar='<pid>',
|
||||
help="process ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.client.processes.delete(self.gid, parsed_args.pid)
|
@ -1,242 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from cliff.command import Command
|
||||
from cliff.lister import Lister
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from rackclient import client
|
||||
|
||||
|
||||
def _make_print_data(pid, ppid, name, nova_instance_id, nova_flavor_id,
|
||||
glance_image_id, keypair_id, securitygroup_ids, networks,
|
||||
userdata, args, app_status, fs_endpoint, ipc_endpoint,
|
||||
shm_endpoint, gid, user_id, project_id, status=None):
|
||||
columns = ['pid', 'ppid', 'name', 'nova_instance_id', 'nova_flavor_id',
|
||||
'glance_image_id', 'keypair_id', 'securitygroup_ids',
|
||||
'networks', 'userdata', 'args', 'app_status', 'fs_endpoint',
|
||||
'ipc_endpoint', 'shm_endpoint', 'gid', 'user_id', 'project_id']
|
||||
data = [pid, ppid, name, nova_instance_id, nova_flavor_id,
|
||||
glance_image_id, keypair_id, securitygroup_ids, networks,
|
||||
userdata, args, app_status, fs_endpoint, ipc_endpoint,
|
||||
shm_endpoint, gid, user_id, project_id]
|
||||
|
||||
if status is not None:
|
||||
columns.append('status')
|
||||
data.append(status)
|
||||
|
||||
return columns, data
|
||||
|
||||
|
||||
class ShowProxy(ShowOne):
|
||||
"""
|
||||
Show details about the rack-proxy process.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ShowProxy, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
proxy = self.client.proxy.get(self.gid)
|
||||
|
||||
sg_ids = proxy.securitygroup_ids
|
||||
if sg_ids:
|
||||
sg_ids = ','.join(sg_ids)
|
||||
|
||||
proxy_args = proxy.args
|
||||
if proxy_args:
|
||||
s = ''
|
||||
for k, v in sorted(proxy_args.items()):
|
||||
s += k + '=' + v + '\n'
|
||||
proxy_args = s.rstrip('\n')
|
||||
|
||||
return _make_print_data(
|
||||
proxy.pid,
|
||||
proxy.ppid,
|
||||
proxy.name,
|
||||
proxy.nova_instance_id,
|
||||
proxy.nova_flavor_id,
|
||||
proxy.glance_image_id,
|
||||
proxy.keypair_id,
|
||||
sg_ids,
|
||||
proxy.networks,
|
||||
proxy.userdata,
|
||||
proxy_args,
|
||||
proxy.app_status,
|
||||
proxy.fs_endpoint,
|
||||
proxy.ipc_endpoint,
|
||||
proxy.shm_endpoint,
|
||||
proxy.gid,
|
||||
proxy.user_id,
|
||||
proxy.project_id,
|
||||
proxy.status
|
||||
)
|
||||
|
||||
|
||||
class CreateProxy(ShowOne):
|
||||
"""
|
||||
Create a rack-proxy process.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(CreateProxy, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(CreateProxy, self).get_parser(prog_name)
|
||||
|
||||
parser.add_argument('--name', metavar='<name>',
|
||||
help="Name of the rack-proxy process")
|
||||
parser.add_argument('--keypair', metavar='<keypair-id>',
|
||||
help="Keypair id of the new process uses")
|
||||
parser.add_argument('--securitygroup', metavar='<securitygroup-id>',
|
||||
dest='securitygroup', action='append',
|
||||
default=[],
|
||||
help=("Securitygroup id the rack-proxy process "
|
||||
"belongs to (Can be repeated)"))
|
||||
parser.add_argument('--flavor', metavar='<nova-flavor-id>',
|
||||
required=True,
|
||||
help=("(Required) Flavor id of "
|
||||
"the rack-proxy process"))
|
||||
parser.add_argument('--image', metavar='<glance-image-id>',
|
||||
required=True,
|
||||
help=("(Required) Image id that registered "
|
||||
"on Glance of the rack-proxy process"))
|
||||
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
proxy = self.client.proxy.create(
|
||||
self.gid,
|
||||
ame=parsed_args.name,
|
||||
ova_flavor_id=parsed_args.flavor,
|
||||
lance_image_id=parsed_args.image,
|
||||
eypair_id=parsed_args.keypair,
|
||||
ecuritygroup_ids=parsed_args.securitygroup)
|
||||
|
||||
sg_ids = proxy.securitygroup_ids
|
||||
if sg_ids:
|
||||
sg_ids = ','.join(sg_ids)
|
||||
|
||||
proxy_args = proxy.args
|
||||
if proxy_args:
|
||||
s = ''
|
||||
for k, v in sorted(proxy_args.items()):
|
||||
s += k + '=' + v + '\n'
|
||||
proxy_args = s.rstrip('\n')
|
||||
|
||||
return _make_print_data(
|
||||
proxy.pid,
|
||||
proxy.ppid,
|
||||
proxy.name,
|
||||
proxy.nova_instance_id,
|
||||
proxy.nova_flavor_id,
|
||||
proxy.glance_image_id,
|
||||
proxy.keypair_id,
|
||||
sg_ids,
|
||||
proxy.networks,
|
||||
proxy.userdata,
|
||||
proxy_args,
|
||||
proxy.app_status,
|
||||
proxy.fs_endpoint,
|
||||
proxy.ipc_endpoint,
|
||||
proxy.shm_endpoint,
|
||||
proxy.gid,
|
||||
proxy.user_id,
|
||||
proxy.project_id,
|
||||
proxy.status
|
||||
)
|
||||
|
||||
|
||||
class UpdateProxy(ShowOne):
|
||||
"""
|
||||
Update the rack-proxy process.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(UpdateProxy, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(UpdateProxy, self).get_parser(prog_name)
|
||||
|
||||
parser.add_argument('--fs-endpoint', metavar='<fs-endpoint>',
|
||||
help="Endpoint of the shared memory service")
|
||||
parser.add_argument('--ipc-endpoint', metavar='<ipc-endpoint>',
|
||||
help="Endpoint of the IPC service")
|
||||
parser.add_argument('--shm-endpoint', metavar='<shm-endpoint>',
|
||||
help="Endpoint of the file system service")
|
||||
parser.add_argument('--app-status', metavar='<app-status>',
|
||||
help="Application layer status of the proxy")
|
||||
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
proxy = self.client.proxy.update(self.gid,
|
||||
parsed_args.fs_endpoint,
|
||||
parsed_args.ipc_endpoint,
|
||||
parsed_args.shm_endpoint,
|
||||
parsed_args.app_status)
|
||||
|
||||
sg_ids = proxy.securitygroup_ids
|
||||
if sg_ids:
|
||||
sg_ids = ','.join(sg_ids)
|
||||
|
||||
proxy_args = proxy.args
|
||||
if proxy_args:
|
||||
s = ''
|
||||
for k, v in sorted(proxy_args.items()):
|
||||
s += k + '=' + v + '\n'
|
||||
proxy_args = s.rstrip('\n')
|
||||
|
||||
return _make_print_data(
|
||||
proxy.pid,
|
||||
proxy.ppid,
|
||||
proxy.name,
|
||||
proxy.nova_instance_id,
|
||||
proxy.nova_flavor_id,
|
||||
proxy.glance_image_id,
|
||||
proxy.keypair_id,
|
||||
sg_ids,
|
||||
proxy.networks,
|
||||
proxy.userdata,
|
||||
proxy_args,
|
||||
proxy.app_status,
|
||||
proxy.fs_endpoint,
|
||||
proxy.ipc_endpoint,
|
||||
proxy.shm_endpoint,
|
||||
proxy.gid,
|
||||
proxy.user_id,
|
||||
proxy.project_id
|
||||
)
|
@ -1,228 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
|
||||
from cliff.command import Command
|
||||
from cliff.lister import Lister
|
||||
from cliff.show import ShowOne
|
||||
|
||||
from rackclient import client
|
||||
from rackclient import exceptions
|
||||
from rackclient import utils
|
||||
|
||||
|
||||
def _make_print_data(securitygroup_id, name, neutron_securitygroup_id,
|
||||
is_default, gid, user_id, project_id, status=None):
|
||||
columns = ['securitygroup_id', 'name', 'neutron_securitygroup_id',
|
||||
'is_default', 'gid', 'user_id', 'project_id']
|
||||
data = [securitygroup_id, name, neutron_securitygroup_id,
|
||||
is_default, gid, user_id, project_id]
|
||||
|
||||
if status is not None:
|
||||
columns.append('status')
|
||||
data.append(status)
|
||||
|
||||
return columns, data
|
||||
|
||||
|
||||
class ListSecuritygroups(Lister):
|
||||
"""
|
||||
Print a list of all security groups in the specified group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ListSecuritygroups, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
securitygroups = self.client.securitygroups.list(self.gid)
|
||||
return (
|
||||
('securitygroup_id', 'name', 'is_default', 'status'),
|
||||
((s.securitygroup_id, s.name, s.is_default, s.status)
|
||||
for s in securitygroups)
|
||||
)
|
||||
|
||||
|
||||
class ShowSecuritygroup(ShowOne):
|
||||
"""
|
||||
Show details about the given security group.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(ShowSecuritygroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(ShowSecuritygroup, self).get_parser(prog_name)
|
||||
parser.add_argument('securitygroup_id', metavar='<securitygroup-id>',
|
||||
help="Securitygroup ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
securitygroup = self.client.securitygroups.get(
|
||||
self.gid,
|
||||
parsed_args.securitygroup_id)
|
||||
return _make_print_data(
|
||||
securitygroup.securitygroup_id,
|
||||
securitygroup.name,
|
||||
securitygroup.neutron_securitygroup_id,
|
||||
securitygroup.is_default,
|
||||
securitygroup.gid,
|
||||
securitygroup.user_id,
|
||||
securitygroup.project_id,
|
||||
securitygroup.status,
|
||||
)
|
||||
|
||||
|
||||
class CreateSecuritygroup(ShowOne):
|
||||
"""
|
||||
Create a new securitygroup.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(CreateSecuritygroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(CreateSecuritygroup, self).get_parser(prog_name)
|
||||
parser.add_argument('--name', metavar='<name>',
|
||||
help="Name of the new securitygroup")
|
||||
parser.add_argument('--is-default', metavar='<true/false>',
|
||||
default=False,
|
||||
help=("Defaults to the default securitygroup "
|
||||
"of the group"))
|
||||
parser.add_argument('--rule',
|
||||
metavar=("<protocol=tcp|udp|icmp,"
|
||||
"port_range_max=integer,"
|
||||
"port_range_min=integer,"
|
||||
"remote_ip_prefix=cidr,"
|
||||
"remote_securitygroup_id="
|
||||
"securitygroup_uuid>"),
|
||||
action='append',
|
||||
type=utils.keyvalue_to_dict,
|
||||
dest='rules',
|
||||
default=[],
|
||||
help=("Securitygroup rules. "
|
||||
"protocol: Protocol of packet, "
|
||||
"port_range_max: Starting port range, "
|
||||
"port_range_min: Ending port range, "
|
||||
"remote_ip_prefix: CIDR to match on, "
|
||||
"remote_securitygroup_id: "
|
||||
"Remote securitygroup id "
|
||||
"to apply rule. (Can be repeated)"))
|
||||
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
securitygroup = self.client.securitygroups.create(
|
||||
self.gid,
|
||||
parsed_args.name,
|
||||
parsed_args.is_default,
|
||||
parsed_args.rules)
|
||||
|
||||
return _make_print_data(
|
||||
securitygroup.securitygroup_id,
|
||||
securitygroup.name,
|
||||
securitygroup.neutron_securitygroup_id,
|
||||
securitygroup.is_default,
|
||||
securitygroup.gid,
|
||||
securitygroup.user_id,
|
||||
securitygroup.project_id,
|
||||
)
|
||||
|
||||
|
||||
class UpdateSecuritygroup(ShowOne):
|
||||
"""
|
||||
Update the specified securitygroup.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(UpdateSecuritygroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(UpdateSecuritygroup, self).get_parser(prog_name)
|
||||
parser.add_argument('securitygroup_id', metavar='<securitygroup-id>',
|
||||
help="Securitygroup ID")
|
||||
parser.add_argument('--is-default', metavar='<true/false>',
|
||||
required=True,
|
||||
help=("Defaults to the default securitygroup "
|
||||
"of the group"))
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
securitygroup = self.client.securitygroups.update(
|
||||
self.gid,
|
||||
parsed_args.securitygroup_id,
|
||||
parsed_args.is_default)
|
||||
return _make_print_data(
|
||||
securitygroup.securitygroup_id,
|
||||
securitygroup.name,
|
||||
securitygroup.neutron_securitygroup_id,
|
||||
securitygroup.is_default,
|
||||
securitygroup.gid,
|
||||
securitygroup.user_id,
|
||||
securitygroup.project_id,
|
||||
)
|
||||
|
||||
|
||||
class DeleteSecuritygroup(Command):
|
||||
"""
|
||||
Delete the specified securitygroup.
|
||||
"""
|
||||
def __init__(self, app, app_args):
|
||||
super(DeleteSecuritygroup, self).__init__(app, app_args)
|
||||
|
||||
# When the help command is called,
|
||||
# the type of 'app_args' is list.
|
||||
if isinstance(app_args, argparse.Namespace):
|
||||
self.client = client.Client(app_args.rack_api_version,
|
||||
rack_url=app_args.rack_url,
|
||||
http_log_debug=app_args.debug)
|
||||
self.gid = app_args.gid
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(DeleteSecuritygroup, self).get_parser(prog_name)
|
||||
parser.add_argument('securitygroup_id', metavar='<securitygroup-id>',
|
||||
help="Securitygroup ID")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.client.securitygroups.delete(
|
||||
self.gid,
|
||||
parsed_args.securitygroup_id)
|
@ -1,79 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.v1 import base
|
||||
|
||||
|
||||
class Group(base.Resource):
|
||||
|
||||
def __repr__(self):
|
||||
return "<Group: %s>" % self.name
|
||||
|
||||
|
||||
class GroupManager(base.Manager):
|
||||
|
||||
resource_class = Group
|
||||
|
||||
def list(self):
|
||||
"""
|
||||
Get a list of all groups.
|
||||
|
||||
:rtype: list of Group.
|
||||
"""
|
||||
return self._list("/groups", "groups")
|
||||
|
||||
def get(self, gid):
|
||||
"""
|
||||
Get a group.
|
||||
|
||||
:param gid: ID of group to get.
|
||||
:rtype: Group.
|
||||
"""
|
||||
return self._get("/groups/%s" % gid, "group")
|
||||
|
||||
def _build_body(self, name, description=None):
|
||||
return {
|
||||
"group": {
|
||||
"name": name,
|
||||
"description": description if description else None
|
||||
}
|
||||
}
|
||||
|
||||
def create(self, name, description=None):
|
||||
"""
|
||||
Create a group.
|
||||
|
||||
:param name: Name of the group.
|
||||
:param description: Descritpion of the group.
|
||||
"""
|
||||
body = self._build_body(name, description)
|
||||
return self._create("/groups", body, "group")
|
||||
|
||||
def update(self, gid, name, description=None):
|
||||
"""
|
||||
Update the name or the description of the group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param name: Name of the group to update.
|
||||
:param description: Description of the group to update.
|
||||
"""
|
||||
body = self._build_body(name, description)
|
||||
return self._update("/groups/%s" % gid, body, "group")
|
||||
|
||||
def delete(self, gid):
|
||||
"""
|
||||
Delete a group.
|
||||
|
||||
:param gid: ID of the group to delete.
|
||||
"""
|
||||
self._delete("/groups/%s" % gid)
|
@ -1,98 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions
|
||||
from rackclient.openstack.common import strutils
|
||||
from rackclient.v1 import base
|
||||
|
||||
|
||||
class Keypair(base.Resource):
|
||||
|
||||
def __repr__(self):
|
||||
return "<Keypair: %s>" % self.name
|
||||
|
||||
|
||||
class KeypairManager(base.Manager):
|
||||
|
||||
resource_class = Keypair
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all keypairs in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Keypair.
|
||||
"""
|
||||
return self._list("/groups/%s/keypairs" % gid, "keypairs")
|
||||
|
||||
def get(self, gid, keypair_id):
|
||||
"""
|
||||
Get a keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param keypair_id: ID of the keypair to get.
|
||||
:rtype: Keypair.
|
||||
"""
|
||||
return self._get("/groups/%s/keypairs/%s" %
|
||||
(gid, keypair_id), "keypair")
|
||||
|
||||
def create(self, gid, name=None, is_default=False):
|
||||
"""
|
||||
Create a keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param name: Name of the keypair.
|
||||
:param is_default: Set to the default keypair of the group.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
raise exceptions.CommandError("is_default must be a boolean.")
|
||||
|
||||
body = {
|
||||
"keypair": {
|
||||
"name": name,
|
||||
"is_default": is_default
|
||||
}
|
||||
}
|
||||
return self._create("/groups/%s/keypairs" % gid, body, "keypair")
|
||||
|
||||
def update(self, gid, keypair_id, is_default):
|
||||
"""
|
||||
Update the status of keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param keypair_id: ID of the keypair to update.
|
||||
:param is_default: Set to the default keypair of the group.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
raise exceptions.CommandError("is_default must be a boolean.")
|
||||
|
||||
body = {
|
||||
"keypair": {
|
||||
"is_default": is_default
|
||||
}
|
||||
}
|
||||
return self._update("/groups/%s/keypairs/%s" %
|
||||
(gid, keypair_id), body, "keypair")
|
||||
|
||||
def delete(self, gid, keypair_id):
|
||||
"""
|
||||
Delete a keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param keypair_id: ID of the keypair to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/keypairs/%s" % (gid, keypair_id))
|
@ -1,111 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import netaddr
|
||||
from rackclient import exceptions
|
||||
from rackclient.openstack.common import strutils
|
||||
from rackclient.v1 import base
|
||||
|
||||
|
||||
class Network(base.Resource):
|
||||
|
||||
def __repr__(self):
|
||||
return "<Network: %s>" % self.name
|
||||
|
||||
|
||||
class NetworkManager(base.Manager):
|
||||
|
||||
resource_class = Network
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all networks in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Network.
|
||||
"""
|
||||
return self._list("/groups/%s/networks" % gid, "networks")
|
||||
|
||||
def get(self, gid, network_id):
|
||||
"""
|
||||
Get a network.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param network_id: ID of the network to get.
|
||||
:rtype: Network.
|
||||
"""
|
||||
return self._get("/groups/%s/networks/%s" %
|
||||
(gid, network_id), "network")
|
||||
|
||||
def create(self, gid, cidr, name=None, is_admin=False,
|
||||
gateway=None, dns_nameservers=None, ext_router_id=None):
|
||||
"""
|
||||
Create a network.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param cidr: CIDR of the new network.
|
||||
:param name: Name of the new network.
|
||||
:param is_admin: is_admin.
|
||||
:param gateway: Gateway ip address of the new network.
|
||||
:param list dns_nameservers: List of DNS servers for the new network.
|
||||
:param ext_router_id: Router id the new network connect to.
|
||||
"""
|
||||
def _is_valid_cidr(address):
|
||||
try:
|
||||
netaddr.IPNetwork(address)
|
||||
except netaddr.AddrFormatError:
|
||||
return False
|
||||
|
||||
ip_segment = address.split('/')
|
||||
if (len(ip_segment) <= 1 or
|
||||
ip_segment[1] == ''):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
if not _is_valid_cidr(cidr):
|
||||
raise exceptions.CommandError("cidr must be a CIDR.")
|
||||
|
||||
if is_admin:
|
||||
try:
|
||||
is_admin = strutils.bool_from_string(is_admin, True)
|
||||
except Exception:
|
||||
raise exceptions.CommandError("is_admin must be a boolean.")
|
||||
|
||||
if gateway and not netaddr.valid_ipv4(gateway):
|
||||
raise exceptions.CommandError("gateway must be a IP address")
|
||||
|
||||
if dns_nameservers is not None and not isinstance(
|
||||
dns_nameservers, list):
|
||||
raise exceptions.CommandError("dns_nameservers must be a list")
|
||||
|
||||
body = {
|
||||
"network": {
|
||||
"cidr": cidr,
|
||||
"name": name,
|
||||
"is_admin": is_admin,
|
||||
"gateway": gateway,
|
||||
"dns_nameservers": dns_nameservers,
|
||||
"ext_router_id": ext_router_id
|
||||
}
|
||||
}
|
||||
return self._create("/groups/%s/networks" % gid, body, "network")
|
||||
|
||||
def delete(self, gid, network_id):
|
||||
"""
|
||||
Delete a network.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param network_id: ID of the network to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/networks/%s" % (gid, network_id))
|
@ -1,119 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import base64
|
||||
from rackclient import exceptions
|
||||
from rackclient.v1 import base
|
||||
|
||||
|
||||
class Process(base.Resource):
|
||||
|
||||
def __repr__(self):
|
||||
return "<Process: %s>" % self.name
|
||||
|
||||
|
||||
class ProcessManager(base.Manager):
|
||||
|
||||
resource_class = Process
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all processes in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Process.
|
||||
"""
|
||||
return self._list("/groups/%s/processes" % gid, "processes")
|
||||
|
||||
def get(self, gid, pid):
|
||||
"""
|
||||
Get a server.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param pid: ID of the process to get.
|
||||
:rtype: Process.
|
||||
"""
|
||||
return self._get("/groups/%s/processes/%s" % (gid, pid), "process")
|
||||
|
||||
def create(self, gid, ppid=None, **kwargs):
|
||||
"""
|
||||
Create a process.
|
||||
|
||||
If you give a ppid(Parent process ID),
|
||||
all other parameters will be inherited to its child process,
|
||||
but you can override them.
|
||||
|
||||
Parameters in kwargs:
|
||||
|
||||
:param name: Name of the new process
|
||||
:param nova_flavor_id: ID of a flavor
|
||||
:param glance_image_id: ID of a glance image
|
||||
:param keypair_id: ID of a keypair
|
||||
:param list securitygroup_ids: List of IDs of securitygroups
|
||||
:param userdata: file type object or string of script
|
||||
:param dict args: Dict of key-value pairs to be stored as metadata
|
||||
"""
|
||||
|
||||
securitygroup_ids = kwargs.get('securitygroup_ids')
|
||||
if securitygroup_ids is not None and not isinstance(
|
||||
securitygroup_ids, list):
|
||||
raise exceptions.CommandError("securitygroup_ids must be a list")
|
||||
|
||||
userdata = kwargs.get('userdata')
|
||||
if userdata:
|
||||
if hasattr(userdata, 'read'):
|
||||
userdata = userdata.read()
|
||||
userdata_b64 = base64.b64encode(userdata)
|
||||
|
||||
args = kwargs.get('args')
|
||||
if args is not None and not isinstance(args, dict):
|
||||
raise exceptions.CommandError("args must be a dict")
|
||||
|
||||
body = {
|
||||
"process": {
|
||||
"ppid": ppid,
|
||||
"name": kwargs.get('name'),
|
||||
"nova_flavor_id": kwargs.get('nova_flavor_id'),
|
||||
"glance_image_id": kwargs.get('glance_image_id'),
|
||||
"keypair_id": kwargs.get('keypair_id'),
|
||||
"securitygroup_ids": securitygroup_ids,
|
||||
"userdata": userdata_b64 if userdata else userdata,
|
||||
"args": args
|
||||
}
|
||||
}
|
||||
return self._create("/groups/%s/processes" % gid, body, "process")
|
||||
|
||||
def update(self, gid, pid, app_status):
|
||||
"""
|
||||
Update status of process.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param pid: ID of the process.
|
||||
:param app_status: Application layer status of the process.
|
||||
"""
|
||||
body = {
|
||||
"process": {
|
||||
"app_status": app_status
|
||||
}
|
||||
}
|
||||
return self._update("/groups/%s/processes/%s" %
|
||||
(gid, pid), body, "process")
|
||||
|
||||
def delete(self, gid, pid):
|
||||
"""
|
||||
Delete a process.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param pid: ID of the process to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/processes/%s" % (gid, pid))
|
@ -1,100 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import base64
|
||||
from rackclient import exceptions
|
||||
from rackclient.v1 import base
|
||||
|
||||
|
||||
class Proxy(base.Resource):
|
||||
|
||||
def __repr__(self):
|
||||
return "<Proxy: %s>" % self.name
|
||||
|
||||
|
||||
class ProxyManager(base.Manager):
|
||||
|
||||
resource_class = Proxy
|
||||
|
||||
def get(self, gid):
|
||||
"""
|
||||
Get a rack-proxy process information.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: Process
|
||||
"""
|
||||
return self._get("/groups/%s/proxy" % gid, "proxy")
|
||||
|
||||
def create(self, gid, name=None, nova_flavor_id=None, glance_image_id=None,
|
||||
keypair_id=None, securitygroup_ids=None, userdata=None,
|
||||
args=None):
|
||||
"""
|
||||
Create a rack-proxy process.
|
||||
|
||||
:param gid: ID of a group
|
||||
:param name: Name of the rack-proxy process
|
||||
:param nova_flavor_id: ID of a flavor
|
||||
:param glance_image_id: ID of a glance image
|
||||
:param keypair_id: ID of a keypair
|
||||
:param securitygroup_ids: List of IDs of securitygroups
|
||||
:param userdata: file type object or string of script
|
||||
:param dict args: Dict of key-value pairs to be stored as metadata
|
||||
"""
|
||||
|
||||
if securitygroup_ids is not None and not isinstance(
|
||||
securitygroup_ids, list):
|
||||
raise exceptions.CommandError("securitygroup_ids must be a list")
|
||||
|
||||
if userdata:
|
||||
if hasattr(userdata, 'read'):
|
||||
userdata = userdata.read()
|
||||
userdata_b64 = base64.b64encode(userdata)
|
||||
|
||||
if args is not None and not isinstance(args, dict):
|
||||
raise exceptions.CommandError("args must be a dict")
|
||||
|
||||
body = {
|
||||
"proxy": {
|
||||
"name": name,
|
||||
"nova_flavor_id": nova_flavor_id,
|
||||
"glance_image_id": glance_image_id,
|
||||
"keypair_id": keypair_id,
|
||||
"securitygroup_ids": securitygroup_ids,
|
||||
"userdata": userdata_b64 if userdata else userdata,
|
||||
"args": args
|
||||
}
|
||||
}
|
||||
return self._create("/groups/%s/proxy" % gid, body, "proxy")
|
||||
|
||||
def update(self, gid, shm_endpoint=None, ipc_endpoint=None,
|
||||
fs_endpoint=None, app_status=None):
|
||||
"""
|
||||
Update parameters of a rack-proxy process.
|
||||
|
||||
:param gid: ID of a group
|
||||
:param shm_endpoint: An endpoint of Shared memory.
|
||||
Arbitrary string value.
|
||||
:param ipc_endpoint: An endpoint of IPC. Arbitrary string value.
|
||||
:param fs_endpoint: An endpoint of File System. Arbitrary string value.
|
||||
:param app_status: Application layer status of a rack-proxy process.
|
||||
"""
|
||||
|
||||
body = {
|
||||
"proxy": {
|
||||
"shm_endpoint": shm_endpoint,
|
||||
"ipc_endpoint": ipc_endpoint,
|
||||
"fs_endpoint": fs_endpoint,
|
||||
"app_status": app_status
|
||||
}
|
||||
}
|
||||
return self._update("/groups/%s/proxy" % gid, body, "proxy")
|
@ -1,107 +0,0 @@
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions
|
||||
from rackclient.openstack.common import strutils
|
||||
from rackclient.v1 import base
|
||||
|
||||
|
||||
class Securitygroup(base.Resource):
|
||||
|
||||
def __repr__(self):
|
||||
return "<Securitygroup: %s>" % self.name
|
||||
|
||||
|
||||
class SecuritygroupManager(base.Manager):
|
||||
|
||||
resource_class = Securitygroup
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all securitygroups in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Securitygroup.
|
||||
"""
|
||||
return self._list("/groups/%s/securitygroups" % gid, "securitygroups")
|
||||
|
||||
def get(self, gid, securitygroup_id):
|
||||
"""
|
||||
Get a securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param securitygroup_id: ID of the securitygroup to get.
|
||||
:rtype: Securitygroup.
|
||||
"""
|
||||
return self._get("/groups/%s/securitygroups/%s" %
|
||||
(gid, securitygroup_id), "securitygroup")
|
||||
|
||||
def create(self, gid, name=None, is_default=False,
|
||||
securitygroup_rules=None):
|
||||
"""
|
||||
Create a securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param name: Name of the securitygroup.
|
||||
:param is_default: Set to the default securitygroup of the group.
|
||||
:param list securitygroup_rules: List of rules of the securitygroup.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
raise exceptions.CommandError("is_default must be a boolean.")
|
||||
|
||||
if securitygroup_rules is not None:
|
||||
if not isinstance(securitygroup_rules, list):
|
||||
raise exceptions.CommandError(
|
||||
"securitygroup_rules must be a list")
|
||||
|
||||
body = {
|
||||
"securitygroup": {
|
||||
"name": name,
|
||||
"is_default": is_default,
|
||||
"securitygrouprules": securitygroup_rules
|
||||
}
|
||||
}
|
||||
return self._create("/groups/%s/securitygroups" %
|
||||
gid, body, "securitygroup")
|
||||
|
||||
def update(self, gid, securitygroup_id, is_default=False):
|
||||
"""
|
||||
Update status of securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param securitygroup_id: ID of the securitygroup to update.
|
||||
:param is_default: Set to the default securitygroup of the group.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
raise exceptions.CommandError("is_default must be a boolean.")
|
||||
|
||||
body = {
|
||||
"securitygroup": {
|
||||
"is_default": is_default,
|
||||
}
|
||||
}
|
||||
return self._update("/groups/%s/securitygroups/%s" %
|
||||
(gid, securitygroup_id), body, "securitygroup")
|
||||
|
||||
def delete(self, gid, securitygroup_id):
|
||||
"""
|
||||
Delete a securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param securitygroup_id: ID of the securitygroup to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/securitygroups/%s" % (gid, securitygroup_id))
|
@ -1,13 +0,0 @@
|
||||
pbr>=0.10.8
|
||||
requests>=1.1.0
|
||||
redis>=2.10.3
|
||||
Babel>=1.3
|
||||
netaddr>=0.7.12
|
||||
six>=1.6.1
|
||||
python-swiftclient>=2.2.0
|
||||
oslo.utils>=0.2.0
|
||||
PrettyTable>=0.7,<0.8
|
||||
websocket-client>=0.16.0
|
||||
python-keystoneclient>=0.11.2
|
||||
pika>=0.9.14
|
||||
cliff>=1.9.0
|
59
setup.cfg
59
setup.cfg
@ -1,59 +0,0 @@
|
||||
[metadata]
|
||||
name = python-rackclient
|
||||
summary = Client library for RACK API
|
||||
description-file =
|
||||
README.md
|
||||
license = Apache License, Version 2.0
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = https://github.com/stackforge/python-rackclient
|
||||
classifier =
|
||||
Environment :: Console
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Developers
|
||||
Intended Audience :: Information Technology
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: OS Independent
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2.6
|
||||
Programming Language :: Python :: 2.7
|
||||
|
||||
[files]
|
||||
packages =
|
||||
rackclient
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
rack = rackclient.shell:main
|
||||
rack.command =
|
||||
group-list = rackclient.v1.command.groups:ListGroups
|
||||
group-show = rackclient.v1.command.groups:ShowGroup
|
||||
group-create = rackclient.v1.command.groups:CreateGroup
|
||||
group-update = rackclient.v1.command.groups:UpdateGroup
|
||||
group-delete = rackclient.v1.command.groups:DeleteGroup
|
||||
group-init = rackclient.v1.command.groups:InitGroup
|
||||
keypair-list = rackclient.v1.command.keypairs:ListKeypairs
|
||||
keypair-show = rackclient.v1.command.keypairs:ShowKeypair
|
||||
keypair-create = rackclient.v1.command.keypairs:CreateKeypair
|
||||
keypair-update = rackclient.v1.command.keypairs:UpdateKeypair
|
||||
keypair-delete = rackclient.v1.command.keypairs:DeleteKeypair
|
||||
securitygroup-list = rackclient.v1.command.securitygroups:ListSecuritygroups
|
||||
securitygroup-show = rackclient.v1.command.securitygroups:ShowSecuritygroup
|
||||
securitygroup-create = rackclient.v1.command.securitygroups:CreateSecuritygroup
|
||||
securitygroup-update = rackclient.v1.command.securitygroups:UpdateSecuritygroup
|
||||
securitygroup-delete = rackclient.v1.command.securitygroups:DeleteSecuritygroup
|
||||
network-list = rackclient.v1.command.networks:ListNetworks
|
||||
network-show = rackclient.v1.command.networks:ShowNetwork
|
||||
network-create = rackclient.v1.command.networks:CreateNetwork
|
||||
network-delete = rackclient.v1.command.networks:DeleteNetwork
|
||||
proxy-show = rackclient.v1.command.proxy:ShowProxy
|
||||
proxy-create = rackclient.v1.command.proxy:CreateProxy
|
||||
proxy-update = rackclient.v1.command.proxy:UpdateProxy
|
||||
ps = rackclient.v1.command.processes:PS
|
||||
show = rackclient.v1.command.processes:Show
|
||||
boot = rackclient.v1.command.processes:Boot
|
||||
kill = rackclient.v1.command.processes:Kill
|
||||
|
||||
# applications
|
||||
montecarlo = rackclient.v1.command.montecarlo:Montecarlo
|
||||
|
5
setup.py
5
setup.py
@ -1,5 +0,0 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
setup_requires=['pbr'],
|
||||
pbr=True)
|
@ -1,6 +0,0 @@
|
||||
coverage
|
||||
discover
|
||||
fixtures
|
||||
testrepository
|
||||
testtools
|
||||
mock
|
32
tox.ini
32
tox.ini
@ -1,32 +0,0 @@
|
||||
[tox]
|
||||
envlist = py26,py27,py33,pep8
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
usedevelop = True
|
||||
install_command = pip install -U {opts} {packages}
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
/usr/bin/find . -type f -name "*.pyc" -delete
|
||||
python setup.py testr --testr-args='{posargs}'
|
||||
|
||||
[testenv:pep8]
|
||||
commands = flake8 {posargs}
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||
|
||||
[tox:jenkins]
|
||||
downloadcache = ~/cache/pip
|
||||
|
||||
[flake8]
|
||||
ignore =
|
||||
show-source = True
|
||||
exclude=.venv,.git,.tox,dist,*openstack/common*,*lib/python*,*egg,build,doc/source/conf.py
|
Loading…
x
Reference in New Issue
Block a user