Fix vendored dependencies

This commit is contained in:
Quentin Machu 2016-10-17 11:43:32 +02:00
parent 24c6647234
commit 9b93c6e7fc
535 changed files with 197971 additions and 14339 deletions

857
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
*.coverprofile

View File

@ -0,0 +1,12 @@
language: go
go:
- 1.4.1
install:
- go get -t -v ./...
- go install github.com/onsi/ginkgo/ginkgo
script:
- export PATH=$HOME/gopath/bin:$PATH
- ginkgo -r -failOnPending -randomizeAllSpecs -race

View File

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -0,0 +1,59 @@
[![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
[![GoDoc](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml?status.svg)](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml)
candiedyaml
===========
YAML for Go
A YAML 1.1 parser with support for YAML 1.2 features
Usage
-----
```go
package myApp
import (
"github.com/cloudfoundry-incubator/candiedyaml"
"fmt"
"os"
)
func main() {
file, err := os.Open("path/to/some/file.yml")
if err != nil {
println("File does not exist:", err.Error())
os.Exit(1)
}
defer file.Close()
document := new(interface{})
decoder := candiedyaml.NewDecoder(file)
err = decoder.Decode(document)
if err != nil {
println("Failed to decode document:", err.Error())
}
println("parsed yml into interface:", fmt.Sprintf("%#v", document))
fileToWrite, err := os.Create("path/to/some/new/file.yml")
if err != nil {
println("Failed to open file for writing:", err.Error())
os.Exit(1)
}
defer fileToWrite.Close()
encoder := candiedyaml.NewEncoder(fileToWrite)
err = encoder.Encode(document)
if err != nil {
println("Failed to encode document:", err.Error())
os.Exit(1)
}
return
}
```

View File

@ -0,0 +1,834 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Create a new parser object.
*/
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
}
return true
}
/*
* Destroy a parser object.
*/
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
/*
* String read handler.
*/
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n := copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
/*
* File read handler.
*/
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
return parser.input_reader.Read(buffer)
}
/*
* Set a string input.
*/
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
/*
* Set a reader input
*/
func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_file_read_handler
parser.input_reader = reader
}
/*
* Set a generic input.
*/
func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = handler
}
/*
* Set the source encoding.
*/
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
parser.encoding = encoding
}
/*
* Create a new emitter object.
*/
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, OUTPUT_BUFFER_SIZE),
raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
}
}
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
/*
* String write handler.
*/
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
/*
* File write handler.
*/
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
/*
* Set a string output.
*/
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = buffer
}
/*
* Set a file output.
*/
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
/*
* Set a generic output handler.
*/
func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = handler
}
/*
* Set the output encoding.
*/
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
emitter.encoding = encoding
}
/*
* Set the canonical output style.
*/
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
/*
* Set the indentation increment.
*/
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
/*
* Set the preferred line width.
*/
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
/*
* Set if unescaped non-ASCII characters are allowed.
*/
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
/*
* Set the preferred line break character.
*/
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
/*
* Destroy a token object.
*/
// yaml_DECLARE(void)
// yaml_token_delete(yaml_token_t *token)
// {
// assert(token); /* Non-NULL token object expected. */
//
// switch (token.type)
// {
// case yaml_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case yaml_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case yaml_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case yaml_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case yaml_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
// }
/*
* Check if a string is a valid UTF-8 sequence.
*
* Check 'reader.c' for more details on UTF-8 encoding.
*/
// static int
// yaml_check_utf8(yaml_char_t *start, size_t length)
// {
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
// }
/*
* Create STREAM-START.
*/
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
/*
* Create STREAM-END.
*/
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_END_EVENT,
}
}
/*
* Create DOCUMENT-START.
*/
func yaml_document_start_event_initialize(event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
/*
* Create DOCUMENT-END.
*/
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
/*
* Create ALIAS.
*/
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
*event = yaml_event_t{
event_type: yaml_ALIAS_EVENT,
anchor: anchor,
}
}
/*
* Create SCALAR.
*/
func yaml_scalar_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte,
value []byte,
plain_implicit bool, quoted_implicit bool,
style yaml_scalar_style_t) {
*event = yaml_event_t{
event_type: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-START.
*/
func yaml_sequence_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-END.
*/
func yaml_sequence_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_END_EVENT,
}
}
/*
* Create MAPPING-START.
*/
func yaml_mapping_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create MAPPING-END.
*/
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_END_EVENT,
}
}
/*
* Destroy an event object.
*/
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
// /*
// * Create a document object.
// */
//
// func yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives []yaml_tag_directive_t,
// start_implicit, end_implicit bool) bool {
//
//
// {
// struct {
// YAML_error_type_t error;
// } context;
// struct {
// yaml_node_t *start;
// yaml_node_t *end;
// yaml_node_t *top;
// } nodes = { NULL, NULL, NULL };
// yaml_version_directive_t *version_directive_copy = NULL;
// struct {
// yaml_tag_directive_t *start;
// yaml_tag_directive_t *end;
// yaml_tag_directive_t *top;
// } tag_directives_copy = { NULL, NULL, NULL };
// yaml_tag_directive_t value = { NULL, NULL };
// YAML_mark_t mark = { 0, 0, 0 };
//
// assert(document); /* Non-NULL document object is expected. */
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end));
// /* Valid tag directives are expected. */
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
// if (!version_directive_copy) goto error;
// version_directive_copy.major = version_directive.major;
// version_directive_copy.minor = version_directive.minor;
// }
//
// if (tag_directives_start != tag_directives_end) {
// yaml_tag_directive_t *tag_directive;
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error;
// for (tag_directive = tag_directives_start;
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle);
// assert(tag_directive.prefix);
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error;
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error;
// value.handle = yaml_strdup(tag_directive.handle);
// value.prefix = yaml_strdup(tag_directive.prefix);
// if (!value.handle || !value.prefix) goto error;
// if (!PUSH(&context, tag_directives_copy, value))
// goto error;
// value.handle = NULL;
// value.prefix = NULL;
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark);
//
// return 1;
//
// error:
// STACK_DEL(&context, nodes);
// yaml_free(version_directive_copy);
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
// }
// STACK_DEL(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
//
// return 0;
// }
//
// /*
// * Destroy a document object.
// */
//
// yaml_DECLARE(void)
// yaml_document_delete(document *yaml_document_t)
// {
// struct {
// YAML_error_type_t error;
// } context;
// yaml_tag_directive_t *tag_directive;
//
// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
//
// assert(document); /* Non-NULL document object is expected. */
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// yaml_node_t node = POP(&context, document.nodes);
// yaml_free(node.tag);
// switch (node.type) {
// case yaml_SCALAR_NODE:
// yaml_free(node.data.scalar.value);
// break;
// case yaml_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items);
// break;
// case yaml_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs);
// break;
// default:
// assert(0); /* Should not happen. */
// }
// }
// STACK_DEL(&context, document.nodes);
//
// yaml_free(document.version_directive);
// for (tag_directive = document.tag_directives.start;
// tag_directive != document.tag_directives.end;
// tag_directive++) {
// yaml_free(tag_directive.handle);
// yaml_free(tag_directive.prefix);
// }
// yaml_free(document.tag_directives.start);
//
// memset(document, 0, sizeof(yaml_document_t));
// }
//
// /**
// * Get a document node.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_node(document *yaml_document_t, int index)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1;
// }
// return NULL;
// }
//
// /**
// * Get the root object.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_root_node(document *yaml_document_t)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start;
// }
// return NULL;
// }
//
// /*
// * Add a scalar node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_scalar(document *yaml_document_t,
// yaml_char_t *tag, yaml_char_t *value, int length,
// yaml_scalar_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// yaml_char_t *value_copy = NULL;
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
// assert(value); /* Non-NULL value is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (length < 0) {
// length = strlen((char *)value);
// }
//
// if (!yaml_check_utf8(value, length)) goto error;
// value_copy = yaml_malloc(length+1);
// if (!value_copy) goto error;
// memcpy(value_copy, value, length);
// value_copy[length] = '\0';
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// yaml_free(tag_copy);
// yaml_free(value_copy);
//
// return 0;
// }
//
// /*
// * Add a sequence node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_sequence(document *yaml_document_t,
// yaml_char_t *tag, yaml_sequence_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_item_t *start;
// yaml_node_item_t *end;
// yaml_node_item_t *top;
// } items = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, items);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Add a mapping node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_mapping(document *yaml_document_t,
// yaml_char_t *tag, yaml_mapping_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_pair_t *start;
// yaml_node_pair_t *end;
// yaml_node_pair_t *top;
// } pairs = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, pairs);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Append an item to a sequence node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_sequence_item(document *yaml_document_t,
// int sequence, int item)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// assert(document); /* Non-NULL document is required. */
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top);
// /* Valid sequence id is required. */
// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
// /* A sequence node is required. */
// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
// /* Valid item id is required. */
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0;
//
// return 1;
// }
//
// /*
// * Append a pair of a key and a value to a mapping node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_mapping_pair(document *yaml_document_t,
// int mapping, int key, int value)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// yaml_node_pair_t pair;
//
// assert(document); /* Non-NULL document is required. */
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top);
// /* Valid mapping id is required. */
// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
// /* A mapping node is required. */
// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
// /* Valid key id is required. */
// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
// /* Valid value id is required. */
//
// pair.key = key;
// pair.value = value;
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0;
//
// return 1;
// }
//

View File

@ -0,0 +1,622 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"runtime"
"strconv"
"strings"
)
type Unmarshaler interface {
UnmarshalYAML(tag string, value interface{}) error
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
type Decoder struct {
parser yaml_parser_t
event yaml_event_t
replay_events []yaml_event_t
useNumber bool
anchors map[string][]yaml_event_t
tracking_anchors [][]yaml_event_t
}
type ParserError struct {
ErrorType YAML_error_type_t
Context string
ContextMark YAML_mark_t
Problem string
ProblemMark YAML_mark_t
}
func (e *ParserError) Error() string {
return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
}
type UnexpectedEventError struct {
Value string
EventType yaml_event_type_t
At YAML_mark_t
}
func (e *UnexpectedEventError) Error() string {
return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
}
func recovery(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
var tmpError error
switch r := r.(type) {
case error:
tmpError = r
case string:
tmpError = errors.New(r)
default:
tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
}
*err = tmpError
}
}
func Unmarshal(data []byte, v interface{}) error {
d := NewDecoder(bytes.NewBuffer(data))
return d.Decode(v)
}
func NewDecoder(r io.Reader) *Decoder {
d := &Decoder{
anchors: make(map[string][]yaml_event_t),
tracking_anchors: make([][]yaml_event_t, 1),
}
yaml_parser_initialize(&d.parser)
yaml_parser_set_input_reader(&d.parser, r)
return d
}
func (d *Decoder) Decode(v interface{}) (err error) {
defer recovery(&err)
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
}
if d.event.event_type == yaml_NO_EVENT {
d.nextEvent()
if d.event.event_type != yaml_STREAM_START_EVENT {
return errors.New("Invalid stream")
}
d.nextEvent()
}
d.document(rv)
return nil
}
func (d *Decoder) UseNumber() { d.useNumber = true }
func (d *Decoder) error(err error) {
panic(err)
}
func (d *Decoder) nextEvent() {
if d.event.event_type == yaml_STREAM_END_EVENT {
d.error(errors.New("The stream is closed"))
}
if d.replay_events != nil {
d.event = d.replay_events[0]
if len(d.replay_events) == 1 {
d.replay_events = nil
} else {
d.replay_events = d.replay_events[1:]
}
} else {
if !yaml_parser_parse(&d.parser, &d.event) {
yaml_event_delete(&d.event)
d.error(&ParserError{
ErrorType: d.parser.error,
Context: d.parser.context,
ContextMark: d.parser.context_mark,
Problem: d.parser.problem,
ProblemMark: d.parser.problem_mark,
})
}
}
last := len(d.tracking_anchors)
// skip aliases when tracking an anchor
if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
}
}
func (d *Decoder) document(rv reflect.Value) {
if d.event.event_type != yaml_DOCUMENT_START_EVENT {
d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
}
d.nextEvent()
d.parse(rv)
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
}
d.nextEvent()
}
func (d *Decoder) parse(rv reflect.Value) {
if !rv.IsValid() {
// skip ahead since we cannot store
d.valueInterface()
return
}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
d.sequence(rv)
d.end_anchor(anchor)
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
d.mapping(rv)
d.end_anchor(anchor)
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
d.scalar(rv)
d.end_anchor(anchor)
case yaml_ALIAS_EVENT:
d.alias(rv)
case yaml_DOCUMENT_END_EVENT:
default:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
}
func (d *Decoder) begin_anchor(anchor string) {
if anchor != "" {
events := []yaml_event_t{d.event}
d.tracking_anchors = append(d.tracking_anchors, events)
}
}
func (d *Decoder) end_anchor(anchor string) {
if anchor != "" {
events := d.tracking_anchors[len(d.tracking_anchors)-1]
d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
// remove the anchor, replaying events shouldn't have anchors
events[0].anchor = nil
// we went one too many, remove the extra event
events = events[:len(events)-1]
// if nested, append to all the other anchors
for i, e := range d.tracking_anchors {
d.tracking_anchors[i] = append(e, events...)
}
d.anchors[anchor] = events
}
}
func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(Unmarshaler); ok {
var temp interface{}
return u, reflect.ValueOf(&temp)
}
}
v = v.Elem()
}
return nil, v
}
func (d *Decoder) sequence(v reflect.Value) {
if d.event.event_type != yaml_SEQUENCE_START_EVENT {
d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
}
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
v.Set(reflect.ValueOf(d.sequenceInterface()))
return
}
// Otherwise it's invalid.
fallthrough
default:
d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
case reflect.Array:
case reflect.Slice:
break
}
d.nextEvent()
i := 0
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
// Get element of array, growing if necessary.
if v.Kind() == reflect.Slice {
// Grow slice if necessary
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
d.parse(v.Index(i))
} else {
// Ran out of fixed array: skip.
d.parse(reflect.Value{})
}
i++
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Array. Zero the rest.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
}
func (d *Decoder) mapping(v reflect.Value) {
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
v.Set(reflect.ValueOf(d.mappingInterface()))
return
}
// Check type of target: struct or map[X]Y
switch v.Kind() {
case reflect.Struct:
d.mappingStruct(v)
return
case reflect.Map:
default:
d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
}
mapt := v.Type()
if v.IsNil() {
v.Set(reflect.MakeMap(mapt))
}
d.nextEvent()
keyt := mapt.Key()
mapElemt := mapt.Elem()
var mapElem reflect.Value
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := reflect.New(keyt)
d.parse(key.Elem())
if !mapElem.IsValid() {
mapElem = reflect.New(mapElemt).Elem()
} else {
mapElem.Set(reflect.Zero(mapElemt))
}
d.parse(mapElem)
v.SetMapIndex(key.Elem(), mapElem)
}
d.nextEvent()
}
func (d *Decoder) mappingStruct(v reflect.Value) {
structt := v.Type()
fields := cachedTypeFields(structt)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := ""
d.parse(reflect.ValueOf(&key))
// Figure out field corresponding to key.
var subv reflect.Value
var f *field
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv = v
for _, i := range f.index {
if subv.Kind() == reflect.Ptr {
if subv.IsNil() {
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
subv = subv.Field(i)
}
}
d.parse(subv)
}
d.nextEvent()
}
func (d *Decoder) scalar(v reflect.Value) {
val := string(d.event.value)
wantptr := null_values[val]
u, pv := d.indirect(v, wantptr)
var tag string
if u != nil {
defer func() {
if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, wantptr)
}
v = pv
var err error
tag, err = resolve(d.event, v, d.useNumber)
if err != nil {
d.error(err)
}
d.nextEvent()
}
func (d *Decoder) alias(rv reflect.Value) {
val, ok := d.anchors[string(d.event.anchor)]
if !ok {
d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
}
d.replay_events = val
d.nextEvent()
d.parse(rv)
}
func (d *Decoder) valueInterface() interface{} {
var v interface{}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
v = d.sequenceInterface()
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
v = d.mappingInterface()
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
v = d.scalarInterface()
case yaml_ALIAS_EVENT:
rv := reflect.ValueOf(&v)
d.alias(rv)
return v
case yaml_DOCUMENT_END_EVENT:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
d.end_anchor(anchor)
return v
}
func (d *Decoder) scalarInterface() interface{} {
_, v := resolveInterface(d.event, d.useNumber)
d.nextEvent()
return v
}
// sequenceInterface is like sequence but returns []interface{}.
func (d *Decoder) sequenceInterface() []interface{} {
var v = make([]interface{}, 0)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
v = append(v, d.valueInterface())
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return v
}
// mappingInterface is like mapping but returns map[interface{}]interface{}.
func (d *Decoder) mappingInterface() map[interface{}]interface{} {
m := make(map[interface{}]interface{})
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
key := d.valueInterface()
// Read value.
m[key] = d.valueInterface()
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return m
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,395 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"io"
"math"
"reflect"
"regexp"
"sort"
"strconv"
"time"
)
var (
timeTimeType = reflect.TypeOf(time.Time{})
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
numberType = reflect.TypeOf(Number(""))
nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
shortTags = map[string]string{
yaml_NULL_TAG: "!!null",
yaml_BOOL_TAG: "!!bool",
yaml_STR_TAG: "!!str",
yaml_INT_TAG: "!!int",
yaml_FLOAT_TAG: "!!float",
yaml_TIMESTAMP_TAG: "!!timestamp",
yaml_SEQ_TAG: "!!seq",
yaml_MAP_TAG: "!!map",
yaml_BINARY_TAG: "!!binary",
}
)
type Marshaler interface {
MarshalYAML() (tag string, value interface{}, err error)
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
emitter yaml_emitter_t
event yaml_event_t
flow bool
err error
}
func Marshal(v interface{}) ([]byte, error) {
b := bytes.Buffer{}
e := NewEncoder(&b)
err := e.Encode(v)
return b.Bytes(), err
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{w: w}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, e.w)
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
return e
}
func (e *Encoder) Encode(v interface{}) (err error) {
defer recovery(&err)
if e.err != nil {
return e.err
}
e.marshal("", reflect.ValueOf(v), true)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
return nil
}
func (e *Encoder) emit() {
if !yaml_emitter_emit(&e.emitter, &e.event) {
panic("bad emit")
}
}
func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
vt := v.Type()
if vt.Implements(marshalerType) {
e.emitMarshaler(tag, v)
return
}
if vt.Kind() != reflect.Ptr && allowAddr {
if reflect.PtrTo(vt).Implements(marshalerType) {
e.emitAddrMarshaler(tag, v)
return
}
}
switch v.Kind() {
case reflect.Interface:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), allowAddr)
}
case reflect.Map:
e.emitMap(tag, v)
case reflect.Ptr:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), true)
}
case reflect.Struct:
e.emitStruct(tag, v)
case reflect.Slice:
e.emitSlice(tag, v)
case reflect.String:
e.emitString(tag, v)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.emitInt(tag, v)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.emitUint(tag, v)
case reflect.Float32, reflect.Float64:
e.emitFloat(tag, v)
case reflect.Bool:
e.emitBool(tag, v)
default:
panic("Can't marshal type yet: " + v.Type().String())
}
}
func (e *Encoder) emitMap(tag string, v reflect.Value) {
e.mapping(tag, func() {
var keys stringValues = v.MapKeys()
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k, true)
e.marshal("", v.MapIndex(k), true)
}
})
}
func (e *Encoder) emitStruct(tag string, v reflect.Value) {
if v.Type() == timeTimeType {
e.emitTime(tag, v)
return
}
fields := cachedTypeFields(v.Type())
e.mapping(tag, func() {
for _, f := range fields {
fv := fieldByIndex(v, f.index)
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
continue
}
e.marshal("", reflect.ValueOf(f.name), true)
e.flow = f.flow
e.marshal("", fv, true)
}
})
}
func (e *Encoder) emitTime(tag string, v reflect.Value) {
t := v.Interface().(time.Time)
bytes, _ := t.MarshalText()
e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *Encoder) mapping(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitSlice(tag string, v reflect.Value) {
if v.Type() == byteSliceType {
e.emitBase64(tag, v)
return
}
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
n := v.Len()
for i := 0; i < n; i++ {
e.marshal("", v.Index(i), true)
}
yaml_sequence_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitBase64(tag string, v reflect.Value) {
if v.IsNil() {
e.emitNil()
return
}
s := v.Bytes()
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
base64.StdEncoding.Encode(dst, s)
e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
}
func (e *Encoder) emitString(tag string, v reflect.Value) {
var style yaml_scalar_style_t
s := v.String()
if nonPrintable.MatchString(s) {
e.emitBase64(tag, v)
return
}
if v.Type() == numberType {
style = yaml_PLAIN_SCALAR_STYLE
} else {
event := yaml_event_t{
implicit: true,
value: []byte(s),
}
rtag, _ := resolveInterface(event, false)
if tag == "" && rtag != yaml_STR_TAG {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if multiline.MatchString(s) {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
}
e.emitScalar(s, "", tag, style)
}
func (e *Encoder) emitBool(tag string, v reflect.Value) {
s := strconv.FormatBool(v.Bool())
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitInt(tag string, v reflect.Value) {
s := strconv.FormatInt(v.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitUint(tag string, v reflect.Value) {
s := strconv.FormatUint(v.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitFloat(tag string, v reflect.Value) {
f := v.Float()
var s string
switch {
case math.IsNaN(f):
s = ".nan"
case math.IsInf(f, 1):
s = "+.inf"
case math.IsInf(f, -1):
s = "-.inf"
default:
s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitNil() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
if !implicit {
style = yaml_PLAIN_SCALAR_STYLE
}
stag := shortTags[tag]
if stag == "" {
stag = tag
}
yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
e.emit()
}
func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
if m == nil {
e.emitNil()
return
}
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}
func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
if !v.CanAddr() {
e.marshal(tag, v, false)
return
}
va := v.Addr()
if va.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}

View File

@ -0,0 +1,19 @@
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,465 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Set the reader error and return 0.
*/
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
/*
* Byte order marks.
*/
const (
BOM_UTF8 = "\xef\xbb\xbf"
BOM_UTF16LE = "\xff\xfe"
BOM_UTF16BE = "\xfe\xff"
)
/*
* Determine the input stream encoding by checking the BOM symbol. If no BOM is
* found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
*/
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
/* Ensure that we had enough bytes in the raw buffer. */
for !parser.eof &&
len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
/* Determine the encoding. */
raw := parser.raw_buffer
pos := parser.raw_buffer_pos
remaining := len(raw) - pos
if remaining >= 2 &&
raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 2 &&
raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 3 &&
raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
/*
* Update the raw buffer.
*/
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
/* Return if the raw buffer is full. */
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
/* Return on EOF. */
if parser.eof {
return true
}
/* Move the remaining bytes in the raw buffer to the beginning. */
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
/* Call the read handler to fill the buffer. */
size_read, err := parser.read_handler(parser,
parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
parser.offset, -1)
}
return true
}
/*
* Ensure that the buffer contains at least `length` characters.
* Return 1 on success, 0 on failure.
*
* The length is supposed to be significantly less that the buffer size.
*/
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
/* Read handler must be set. */
if parser.read_handler == nil {
panic("read handler must be set")
}
/* If the EOF flag is set and the raw buffer is empty, do nothing. */
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
/* Return if the buffer contains enough characters. */
if parser.unread >= length {
return true
}
/* Determine the input encoding if it is not known yet. */
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
/* Move the unread characters to the beginning of the buffer. */
buffer_end := len(parser.buffer)
if 0 < parser.buffer_pos &&
parser.buffer_pos < buffer_end {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_end -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_end {
buffer_end = 0
parser.buffer_pos = 0
}
parser.buffer = parser.buffer[:cap(parser.buffer)]
/* Fill the buffer until it has enough characters. */
first := true
for parser.unread < length {
/* Fill the raw buffer if necessary. */
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_end]
return false
}
}
first = false
/* Decode the raw buffer. */
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var w int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
incomplete := false
/* Decode the next character. */
switch parser.encoding {
case yaml_UTF8_ENCODING:
/*
* Decode a UTF-8 character. Check RFC 3629
* (http://www.ietf.org/rfc/rfc3629.txt) for more details.
*
* The following table (taken from the RFC) is used for
* decoding.
*
* Char. number range | UTF-8 octet sequence
* (hexadecimal) | (binary)
* --------------------+------------------------------------
* 0000 0000-0000 007F | 0xxxxxxx
* 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
* 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
* 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
*
* Additionally, the characters in the range 0xD800-0xDFFF
* are prohibited as they are reserved for use with UTF-16
* surrogate pairs.
*/
/* Determine the length of the UTF-8 sequence. */
octet := parser.raw_buffer[parser.raw_buffer_pos]
w = width(octet)
/* Check if the leading octet is valid. */
if w == 0 {
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
/* Check if the raw buffer contains an incomplete character. */
if w > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
incomplete = true
break
}
/* Decode the leading octet. */
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
/* Check and decode the trailing octets. */
for k := 1; k < w; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
/* Check if the octet is valid. */
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
/* Decode the octet. */
value = (value << 6) + rune(octet&0x3F)
}
/* Check the length of the sequence against the value. */
switch {
case w == 1:
case w == 2 && value >= 0x80:
case w == 3 && value >= 0x800:
case w == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
/* Check the range of the value. */
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING,
yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
/*
* The UTF-16 encoding is not as simple as one might
* naively think. Check RFC 2781
* (http://www.ietf.org/rfc/rfc2781.txt).
*
* Normally, two subsequent bytes describe a Unicode
* character. However a special technique (called a
* surrogate pair) is used for specifying character
* values larger than 0xFFFF.
*
* A surrogate pair consists of two pseudo-characters:
* high surrogate area (0xD800-0xDBFF)
* low surrogate area (0xDC00-0xDFFF)
*
* The following formulas are used for decoding
* and encoding characters using surrogate pairs:
*
* U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
* U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
* W1 = 110110yyyyyyyyyy
* W2 = 110111xxxxxxxxxx
*
* where U is the character value, W1 is the high surrogate
* area, W2 is the low surrogate area.
*/
/* Check for incomplete UTF-16 character. */
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the character. */
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
/* Check for unexpected low surrogate area. */
if (value & 0xFC00) == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
/* Check for a high surrogate area. */
if (value & 0xFC00) == 0xD800 {
w = 4
/* Check for incomplete surrogate pair. */
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the next character. */
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
/* Check for a low surrogate area. */
if (value2 & 0xFC00) != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
/* Generate the value of the surrogate pair. */
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
w = 2
}
break
default:
panic("Impossible") /* Impossible. */
}
/* Check if the raw buffer contains enough bytes to form a character. */
if incomplete {
break
}
/*
* Check if the character is in the allowed range:
* #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
* | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
* | [#x10000-#x10FFFF] (32 bit)
*/
if !(value == 0x09 || value == 0x0A || value == 0x0D ||
(value >= 0x20 && value <= 0x7E) ||
(value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
(value >= 0xE000 && value <= 0xFFFD) ||
(value >= 0x10000 && value <= 0x10FFFF)) {
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
/* Move the raw pointers. */
parser.raw_buffer_pos += w
parser.offset += w
/* Finally put the character into the buffer. */
/* 0000 0000-0000 007F . 0xxxxxxx */
if value <= 0x7F {
parser.buffer[buffer_end] = byte(value)
} else if value <= 0x7FF {
/* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
} else if value <= 0xFFFF {
/* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
} else {
/* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
}
buffer_end += w
parser.unread++
}
/* On EOF, put NUL into the buffer and return. */
if parser.eof {
parser.buffer[buffer_end] = 0
buffer_end++
parser.buffer = parser.buffer[:buffer_end]
parser.unread++
return true
}
}
parser.buffer = parser.buffer[:buffer_end]
return true
}

View File

@ -0,0 +1,449 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"fmt"
"math"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
var byteSliceType = reflect.TypeOf([]byte(nil))
var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
var bool_values map[string]bool
var null_values map[string]bool
var signs = []byte{'-', '+'}
var nulls = []byte{'~', 'n', 'N'}
var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
var timestamp_regexp *regexp.Regexp
var ymd_regexp *regexp.Regexp
func init() {
bool_values = make(map[string]bool)
bool_values["y"] = true
bool_values["yes"] = true
bool_values["n"] = false
bool_values["no"] = false
bool_values["true"] = true
bool_values["false"] = false
bool_values["on"] = true
bool_values["off"] = false
null_values = make(map[string]bool)
null_values["~"] = true
null_values["null"] = true
null_values["Null"] = true
null_values["NULL"] = true
timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
}
func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
val := string(event.value)
if null_values[val] {
v.Set(reflect.Zero(v.Type()))
return yaml_NULL_TAG, nil
}
switch v.Kind() {
case reflect.String:
if useNumber && v.Type() == numberType {
tag, i := resolveInterface(event, useNumber)
if n, ok := i.(Number); ok {
v.Set(reflect.ValueOf(n))
return tag, nil
}
return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
}
return resolve_string(val, v, event)
case reflect.Bool:
return resolve_bool(val, v, event)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return resolve_int(val, v, useNumber, event)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return resolve_uint(val, v, useNumber, event)
case reflect.Float32, reflect.Float64:
return resolve_float(val, v, useNumber, event)
case reflect.Interface:
_, i := resolveInterface(event, useNumber)
if i != nil {
v.Set(reflect.ValueOf(i))
} else {
v.Set(reflect.Zero(v.Type()))
}
case reflect.Struct:
return resolve_time(val, v, event)
case reflect.Slice:
if v.Type() != byteSliceType {
return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
}
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
v.Set(reflect.ValueOf(b))
default:
return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
}
return yaml_STR_TAG, nil
}
func hasBinaryTag(event yaml_event_t) bool {
for _, tag := range binary_tags {
if bytes.Equal(event.tag, tag) {
return true
}
}
return false
}
func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
n, err := base64.StdEncoding.Decode(b, value)
if err != nil {
return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
}
return b[:n], nil
}
func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
if len(event.tag) > 0 {
if hasBinaryTag(event) {
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
val = string(b)
}
}
v.SetString(val)
return yaml_STR_TAG, nil
}
func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
b, found := bool_values[strings.ToLower(val)]
if !found {
return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
}
v.SetBool(b)
return yaml_BOOL_TAG, nil
}
func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
sign := int64(1)
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
var val64 int64
if value <= math.MaxInt64 {
val64 = int64(value)
if sign == -1 {
val64 = -val64
}
} else if sign == -1 && value == uint64(math.MaxInt64)+1 {
val64 = math.MinInt64
} else {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatInt(val64, 10))
} else {
if v.OverflowInt(val64) {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
v.SetInt(val64)
}
return yaml_INT_TAG, nil
}
func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
if val[0] == '-' {
return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
}
if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatUint(value, 10))
} else {
if v.OverflowUint(value) {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
v.SetUint(value)
}
return yaml_INT_TAG, nil
}
func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
val = strings.Replace(val, "_", "", -1)
var value float64
isNumberValue := v.Type() == numberType
typeBits := 64
if !isNumberValue {
typeBits = v.Type().Bits()
}
sign := 1
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
valLower := strings.ToLower(val)
if valLower == ".inf" {
value = math.Inf(sign)
} else if valLower == ".nan" {
value = math.NaN()
} else {
var err error
value, err = strconv.ParseFloat(val, typeBits)
value *= float64(sign)
if err != nil {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
}
if isNumberValue {
v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
} else {
if v.OverflowFloat(value) {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
v.SetFloat(value)
}
return yaml_FLOAT_TAG, nil
}
func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
var parsedTime time.Time
matches := ymd_regexp.FindStringSubmatch(val)
if len(matches) > 0 {
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
} else {
matches = timestamp_regexp.FindStringSubmatch(val)
if len(matches) == 0 {
return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
}
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
hour, _ := strconv.Atoi(matches[4])
min, _ := strconv.Atoi(matches[5])
sec, _ := strconv.Atoi(matches[6])
nsec := 0
if matches[7] != "" {
millis, _ := strconv.Atoi(matches[7])
nsec = int(time.Duration(millis) * time.Millisecond)
}
loc := time.UTC
if matches[8] != "" {
sign := matches[8][0]
hr, _ := strconv.Atoi(matches[8][1:])
min := 0
if matches[9] != "" {
min, _ = strconv.Atoi(matches[9])
}
zoneOffset := (hr*60 + min) * 60
if sign == '-' {
zoneOffset = -zoneOffset
}
loc = time.FixedZone("", zoneOffset)
}
parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
}
v.Set(reflect.ValueOf(parsedTime))
return "", nil
}
func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
val := string(event.value)
if len(event.tag) == 0 && !event.implicit {
return "", val
}
if len(val) == 0 {
return yaml_NULL_TAG, nil
}
var result interface{}
sign := false
c := val[0]
switch {
case bytes.IndexByte(signs, c) != -1:
sign = true
fallthrough
case c >= '0' && c <= '9':
i := int64(0)
result = &i
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_int(val, v, useNumber, event); err == nil {
return yaml_INT_TAG, v.Interface()
}
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v = reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
if !sign {
t := time.Time{}
if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
return "", t
}
}
case bytes.IndexByte(nulls, c) != -1:
if null_values[val] {
return yaml_NULL_TAG, nil
}
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
case c == '.':
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
case bytes.IndexByte(bools, c) != -1:
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
}
if hasBinaryTag(event) {
bytes, err := decode_binary(event.value, event)
if err == nil {
return yaml_BINARY_TAG, bytes
}
}
return yaml_STR_TAG, val
}

View File

@ -0,0 +1,62 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"os"
)
func Run_parser(cmd string, args []string) {
for i := 0; i < len(args); i++ {
fmt.Printf("[%d] Scanning '%s'", i, args[i])
file, err := os.Open(args[i])
if err != nil {
panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_reader(&parser, file)
failed := false
token := yaml_token_t{}
count := 0
for {
if !yaml_parser_scan(&parser, &token) {
failed = true
break
}
if token.token_type == yaml_STREAM_END_TOKEN {
break
}
count++
}
file.Close()
msg := "SUCCESS"
if failed {
msg = "FAILED"
if parser.error != yaml_NO_ERROR {
m := parser.problem_mark
fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
parser.context, parser.problem, m.line, m.column)
}
}
fmt.Printf("%s (%d tokens)\n", msg, count)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,360 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"reflect"
"sort"
"strings"
"sync"
"unicode"
)
// A field represents a single field found in a struct.
type field struct {
name string
tag bool
index []int
typ reflect.Type
omitEmpty bool
flow bool
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("yaml")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft,
opts.Contains("omitempty"), opts.Contains("flow")})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflect.Value
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool {
av, ak := getElem(sv[i])
bv, bk := getElem(sv[j])
if ak == reflect.String && bk == reflect.String {
return av.String() < bv.String()
}
return ak < bk
}
func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
k := v.Kind()
for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
v = v.Elem()
k = v.Kind()
}
return v, k
}
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View File

@ -0,0 +1,128 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
/*
* Set the writer error and return 0.
*/
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
/*
* Flush the output buffer.
*/
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("Write handler must be set") /* Write handler must be set. */
}
if emitter.encoding == yaml_ANY_ENCODING {
panic("Encoding must be set") /* Output encoding must be set. */
}
/* Check if the buffer is empty. */
if emitter.buffer_pos == 0 {
return true
}
/* If the output encoding is UTF-8, we don't need to recode the buffer. */
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter,
emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
/* Recode the buffer into the raw buffer. */
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
/*
* See the "reader.c" code for more details on UTF-8 encoding. Note
* that we assume that the buffer contains a valid UTF-8 sequence.
*/
/* Read the next UTF-8 character. */
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
/* Write the character. */
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
/* Write the character using a surrogate pair (check "reader.c"). */
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
/* Write the raw buffer. */
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

View File

@ -0,0 +1,22 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
const (
yaml_VERSION_MAJOR = 0
yaml_VERSION_MINOR = 1
yaml_VERSION_PATCH = 6
yaml_VERSION_STRING = "0.1.6"
)

View File

@ -0,0 +1,891 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
const (
INPUT_RAW_BUFFER_SIZE = 1024
/*
* The size of the input buffer.
*
* It should be possible to decode the whole raw buffer.
*/
INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
/*
* The size of the output buffer.
*/
OUTPUT_BUFFER_SIZE = 512
/*
* The size of the output raw buffer.
*
* It should be possible to encode the whole output buffer.
*/
OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
INITIAL_STACK_SIZE = 16
INITIAL_QUEUE_SIZE = 16
)
func width(b byte) int {
if b&0x80 == 0 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}
func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
w := width(src[*src_pos])
switch w {
case 4:
dest[*dest_pos+3] = src[*src_pos+3]
fallthrough
case 3:
dest[*dest_pos+2] = src[*src_pos+2]
fallthrough
case 2:
dest[*dest_pos+1] = src[*src_pos+1]
fallthrough
case 1:
dest[*dest_pos] = src[*src_pos]
default:
panic("invalid width")
}
*dest_pos += w
*src_pos += w
}
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
func is_alpha(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z') ||
b == '_' || b == '-'
}
// /*
// * Check if the character at the specified position is a digit.
// */
//
func is_digit(b byte) bool {
return b >= '0' && b <= '9'
}
// /*
// * Get the value of a digit.
// */
//
func as_digit(b byte) int {
return int(b) - '0'
}
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
func is_hex(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
//
// /*
// * Get the value of a hex-digit.
// */
//
func as_hex(b byte) int {
if b >= 'A' && b <= 'F' {
return int(b) - 'A' + 10
} else if b >= 'a' && b <= 'f' {
return int(b) - 'a' + 10
}
return int(b) - '0'
}
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
func is_blankz_at(b []byte, i int) bool {
return is_blank(b[i]) || is_breakz_at(b, i)
}
// /*
// * Check if the character at the specified position is a line break.
// */
func is_break_at(b []byte, i int) bool {
return b[i] == '\r' || /* CR (#xD)*/
b[i] == '\n' || /* LF (#xA) */
(b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
}
func is_breakz_at(b []byte, i int) bool {
return is_break_at(b, i) || is_z(b[i])
}
func is_crlf_at(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// /*
// * Check if the character at the specified position is NUL.
// */
func is_z(b byte) bool {
return b == 0x0
}
// /*
// * Check if the character at the specified position is space.
// */
func is_space(b byte) bool {
return b == ' '
}
//
// /*
// * Check if the character at the specified position is tab.
// */
func is_tab(b byte) bool {
return b == '\t'
}
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
func is_blank(b byte) bool {
return is_space(b) || is_tab(b)
}
// /*
// * Check if the character is ASCII.
// */
func is_ascii(b byte) bool {
return b <= '\x7f'
}
// /*
// * Check if the character can be printed unescaped.
// */
func is_printable_at(b []byte, i int) bool {
return ((b[i] == 0x0A) || /* . == #x0A */
(b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
(b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && /* && . != #xFEFF */
!(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
// collapse the slice
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
// move the tokens down
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
// readjust the length
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// /*
// * Check if the character at the specified position is BOM.
// */
//
func is_bom_at(b []byte, i int) bool {
return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
}
//
// #ifdef HAVE_CONFIG_H
// #include <config.h>
// #endif
//
// #include "./yaml.h"
//
// #include <assert.h>
// #include <limits.h>
//
// /*
// * Memory management.
// */
//
// yaml_DECLARE(void *)
// yaml_malloc(size_t size);
//
// yaml_DECLARE(void *)
// yaml_realloc(void *ptr, size_t size);
//
// yaml_DECLARE(void)
// yaml_free(void *ptr);
//
// yaml_DECLARE(yaml_char_t *)
// yaml_strdup(const yaml_char_t *);
//
// /*
// * Reader: Ensure that the buffer contains at least `length` characters.
// */
//
// yaml_DECLARE(int)
// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
//
// /*
// * Scanner: Ensure that the token stack contains at least one token ready.
// */
//
// yaml_DECLARE(int)
// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
//
// /*
// * The size of the input raw buffer.
// */
//
// #define INPUT_RAW_BUFFER_SIZE 16384
//
// /*
// * The size of the input buffer.
// *
// * It should be possible to decode the whole raw buffer.
// */
//
// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
//
// /*
// * The size of the output buffer.
// */
//
// #define OUTPUT_BUFFER_SIZE 16384
//
// /*
// * The size of the output raw buffer.
// *
// * It should be possible to encode the whole output buffer.
// */
//
// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
//
// /*
// * The size of other stacks and queues.
// */
//
// #define INITIAL_STACK_SIZE 16
// #define INITIAL_QUEUE_SIZE 16
// #define INITIAL_STRING_SIZE 16
//
// /*
// * Buffer management.
// */
//
// #define BUFFER_INIT(context,buffer,size) \
// (((buffer).start = yaml_malloc(size)) ? \
// ((buffer).last = (buffer).pointer = (buffer).start, \
// (buffer).end = (buffer).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define BUFFER_DEL(context,buffer) \
// (yaml_free((buffer).start), \
// (buffer).start = (buffer).pointer = (buffer).end = 0)
//
// /*
// * String management.
// */
//
// typedef struct {
// yaml_char_t *start;
// yaml_char_t *end;
// yaml_char_t *pointer;
// } yaml_string_t;
//
// yaml_DECLARE(int)
// yaml_string_extend(yaml_char_t **start,
// yaml_char_t **pointer, yaml_char_t **end);
//
// yaml_DECLARE(int)
// yaml_string_join(
// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
//
// #define NULL_STRING { NULL, NULL, NULL }
//
// #define STRING(string,length) { (string), (string)+(length), (string) }
//
// #define STRING_ASSIGN(value,string,length) \
// ((value).start = (string), \
// (value).end = (string)+(length), \
// (value).pointer = (string))
//
// #define STRING_INIT(context,string,size) \
// (((string).start = yaml_malloc(size)) ? \
// ((string).pointer = (string).start, \
// (string).end = (string).start+(size), \
// memset((string).start, 0, (size)), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STRING_DEL(context,string) \
// (yaml_free((string).start), \
// (string).start = (string).pointer = (string).end = 0)
//
// #define STRING_EXTEND(context,string) \
// (((string).pointer+5 < (string).end) \
// || yaml_string_extend(&(string).start, \
// &(string).pointer, &(string).end))
//
// #define CLEAR(context,string) \
// ((string).pointer = (string).start, \
// memset((string).start, 0, (string).end-(string).start))
//
// #define JOIN(context,string_a,string_b) \
// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
// &(string_a).end, &(string_b).start, \
// &(string_b).pointer, &(string_b).end)) ? \
// ((string_b).pointer = (string_b).start, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * String check operations.
// */
//
// /*
// * Check the octet at the specified position.
// */
//
// #define CHECK_AT(string,octet,offset) \
// ((string).pointer[offset] == (yaml_char_t)(octet))
//
// /*
// * Check the current octet in the buffer.
// */
//
// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
//
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
//
// #define IS_ALPHA_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'z') || \
// (string).pointer[offset] == '_' || \
// (string).pointer[offset] == '-')
//
// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
//
// /*
// * Check if the character at the specified position is a digit.
// */
//
// #define IS_DIGIT_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9'))
//
// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
//
// /*
// * Get the value of a digit.
// */
//
// #define AS_DIGIT_AT(string,offset) \
// ((string).pointer[offset] - (yaml_char_t) '0')
//
// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
//
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
// #define IS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f'))
//
// #define IS_HEX(string) IS_HEX_AT((string),0)
//
// /*
// * Get the value of a hex-digit.
// */
//
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
//
// #define AS_HEX(string) AS_HEX_AT((string),0)
//
// /*
// * Check if the character is ASCII.
// */
//
// #define IS_ASCII_AT(string,offset) \
// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
//
// #define IS_ASCII(string) IS_ASCII_AT((string),0)
//
// /*
// * Check if the character can be printed unescaped.
// */
//
// #define IS_PRINTABLE_AT(string,offset) \
// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
// && (string).pointer[offset] <= 0x7E) \
// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
// && (string).pointer[offset+1] >= 0xA0) \
// || ((string).pointer[offset] > 0xC2 \
// && (string).pointer[offset] < 0xED) \
// || ((string).pointer[offset] == 0xED \
// && (string).pointer[offset+1] < 0xA0) \
// || ((string).pointer[offset] == 0xEE) \
// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
// && (string).pointer[offset+2] == 0xBF) \
// && !((string).pointer[offset+1] == 0xBF \
// && ((string).pointer[offset+2] == 0xBE \
// || (string).pointer[offset+2] == 0xBF))))
//
// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
//
// /*
// * Check if the character at the specified position is NUL.
// */
//
// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
//
// #define IS_Z(string) IS_Z_AT((string),0)
//
// /*
// * Check if the character at the specified position is BOM.
// */
//
// #define IS_BOM_AT(string,offset) \
// (CHECK_AT((string),'\xEF',(offset)) \
// && CHECK_AT((string),'\xBB',(offset)+1) \
// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
//
// #define IS_BOM(string) IS_BOM_AT(string,0)
//
// /*
// * Check if the character at the specified position is space.
// */
//
// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
//
// #define IS_SPACE(string) IS_SPACE_AT((string),0)
//
// /*
// * Check if the character at the specified position is tab.
// */
//
// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
//
// #define IS_TAB(string) IS_TAB_AT((string),0)
//
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
//
// #define IS_BLANK_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
//
// #define IS_BLANK(string) IS_BLANK_AT((string),0)
//
// /*
// * Check if the character at the specified position is a line break.
// */
//
// #define IS_BREAK_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
// || (CHECK_AT((string),'\xC2',(offset)) \
// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
//
// #define IS_BREAK(string) IS_BREAK_AT((string),0)
//
// #define IS_CRLF_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
//
// #define IS_CRLF(string) IS_CRLF_AT((string),0)
//
// /*
// * Check if the character is a line break or NUL.
// */
//
// #define IS_BREAKZ_AT(string,offset) \
// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
//
// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, or NUL.
// */
//
// #define IS_SPACEZ_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
//
// #define IS_BLANKZ_AT(string,offset) \
// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
//
// /*
// * Determine the width of the character.
// */
//
// #define WIDTH_AT(string,offset) \
// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
//
// #define WIDTH(string) WIDTH_AT((string),0)
//
// /*
// * Move the string pointer to the next character.
// */
//
// #define MOVE(string) ((string).pointer += WIDTH((string)))
//
// /*
// * Copy a character and move the pointers of both strings.
// */
//
// #define COPY(string_a,string_b) \
// ((*(string_b).pointer & 0x80) == 0x00 ? \
// (*((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xE0) == 0xC0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF0) == 0xE0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF8) == 0xF0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
//
// /*
// * Stack and queue management.
// */
//
// yaml_DECLARE(int)
// yaml_stack_extend(void **start, void **top, void **end);
//
// yaml_DECLARE(int)
// yaml_queue_extend(void **start, void **head, void **tail, void **end);
//
// #define STACK_INIT(context,stack,size) \
// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
// ((stack).top = (stack).start, \
// (stack).end = (stack).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STACK_DEL(context,stack) \
// (yaml_free((stack).start), \
// (stack).start = (stack).top = (stack).end = 0)
//
// #define STACK_EMPTY(context,stack) \
// ((stack).start == (stack).top)
//
// #define PUSH(context,stack,value) \
// (((stack).top != (stack).end \
// || yaml_stack_extend((void **)&(stack).start, \
// (void **)&(stack).top, (void **)&(stack).end)) ? \
// (*((stack).top++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define POP(context,stack) \
// (*(--(stack).top))
//
// #define QUEUE_INIT(context,queue,size) \
// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
// ((queue).head = (queue).tail = (queue).start, \
// (queue).end = (queue).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define QUEUE_DEL(context,queue) \
// (yaml_free((queue).start), \
// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
//
// #define QUEUE_EMPTY(context,queue) \
// ((queue).head == (queue).tail)
//
// #define ENQUEUE(context,queue,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (*((queue).tail++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define DEQUEUE(context,queue) \
// (*((queue).head++))
//
// #define QUEUE_INSERT(context,queue,index,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (memmove((queue).head+(index)+1,(queue).head+(index), \
// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
// *((queue).head+(index)) = value, \
// (queue).tail++, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * Token initializers.
// */
//
// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
// (memset(&(token), 0, sizeof(yaml_token_t)), \
// (token).type = (token_type), \
// (token).start_mark = (token_start_mark), \
// (token).end_mark = (token_end_mark))
//
// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
// (token).data.stream_start.encoding = (token_encoding))
//
// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
//
// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
// (token).data.alias.value = (token_value))
//
// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
// (token).data.anchor.value = (token_value))
//
// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag.handle = (token_handle), \
// (token).data.tag.suffix = (token_suffix))
//
// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
// (token).data.scalar.value = (token_value), \
// (token).data.scalar.length = (token_length), \
// (token).data.scalar.style = (token_style))
//
// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.version_directive.major = (token_major), \
// (token).data.version_directive.minor = (token_minor))
//
// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag_directive.handle = (token_handle), \
// (token).data.tag_directive.prefix = (token_prefix))
//
// /*
// * Event initializers.
// */
//
// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
// (memset(&(event), 0, sizeof(yaml_event_t)), \
// (event).type = (event_type), \
// (event).start_mark = (event_start_mark), \
// (event).end_mark = (event_end_mark))
//
// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
// (event).data.stream_start.encoding = (event_encoding))
//
// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
//
// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
// (event).data.document_start.version_directive = (event_version_directive), \
// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
// (event).data.document_start.implicit = (event_implicit))
//
// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
// (event).data.document_end.implicit = (event_implicit))
//
// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
// (event).data.alias.anchor = (event_anchor))
//
// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
// (event).data.scalar.anchor = (event_anchor), \
// (event).data.scalar.tag = (event_tag), \
// (event).data.scalar.value = (event_value), \
// (event).data.scalar.length = (event_length), \
// (event).data.scalar.plain_implicit = (event_plain_implicit), \
// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
// (event).data.scalar.style = (event_style))
//
// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
// (event).data.sequence_start.anchor = (event_anchor), \
// (event).data.sequence_start.tag = (event_tag), \
// (event).data.sequence_start.implicit = (event_implicit), \
// (event).data.sequence_start.style = (event_style))
//
// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
//
// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
// (event).data.mapping_start.anchor = (event_anchor), \
// (event).data.mapping_start.tag = (event_tag), \
// (event).data.mapping_start.implicit = (event_implicit), \
// (event).data.mapping_start.style = (event_style))
//
// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
//
// /*
// * Document initializer.
// */
//
// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
// document_version_directive,document_tag_directives_start, \
// document_tag_directives_end,document_start_implicit, \
// document_end_implicit,document_start_mark,document_end_mark) \
// (memset(&(document), 0, sizeof(yaml_document_t)), \
// (document).nodes.start = (document_nodes_start), \
// (document).nodes.end = (document_nodes_end), \
// (document).nodes.top = (document_nodes_start), \
// (document).version_directive = (document_version_directive), \
// (document).tag_directives.start = (document_tag_directives_start), \
// (document).tag_directives.end = (document_tag_directives_end), \
// (document).start_implicit = (document_start_implicit), \
// (document).end_implicit = (document_end_implicit), \
// (document).start_mark = (document_start_mark), \
// (document).end_mark = (document_end_mark))
//
// /*
// * Node initializers.
// */
//
// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
// (memset(&(node), 0, sizeof(yaml_node_t)), \
// (node).type = (node_type), \
// (node).tag = (node_tag), \
// (node).start_mark = (node_start_mark), \
// (node).end_mark = (node_end_mark))
//
// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.scalar.value = (node_value), \
// (node).data.scalar.length = (node_length), \
// (node).data.scalar.style = (node_style))
//
// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.sequence.items.start = (node_items_start), \
// (node).data.sequence.items.end = (node_items_end), \
// (node).data.sequence.items.top = (node_items_start), \
// (node).data.sequence.style = (node_style))
//
// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.mapping.pairs.start = (node_pairs_start), \
// (node).data.mapping.pairs.end = (node_pairs_end), \
// (node).data.mapping.pairs.top = (node_pairs_start), \
// (node).data.mapping.style = (node_style))
//

View File

@ -0,0 +1,953 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"io"
)
/** The version directive data. */
type yaml_version_directive_t struct {
major int // The major version number
minor int // The minor version number
}
/** The tag directive data. */
type yaml_tag_directive_t struct {
handle []byte // The tag handle
prefix []byte // The tag prefix
}
/** The stream encoding. */
type yaml_encoding_t int
const (
/** Let the parser choose the encoding. */
yaml_ANY_ENCODING yaml_encoding_t = iota
/** The defau lt UTF-8 encoding. */
yaml_UTF8_ENCODING
/** The UTF-16-LE encoding with BOM. */
yaml_UTF16LE_ENCODING
/** The UTF-16-BE encoding with BOM. */
yaml_UTF16BE_ENCODING
)
/** Line break types. */
type yaml_break_t int
const (
yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
)
/** Many bad things could happen with the parser and emitter. */
type YAML_error_type_t int
const (
/** No error is produced. */
yaml_NO_ERROR YAML_error_type_t = iota
/** Cannot allocate or reallocate a block of memory. */
yaml_MEMORY_ERROR
/** Cannot read or decode the input stream. */
yaml_READER_ERROR
/** Cannot scan the input stream. */
yaml_SCANNER_ERROR
/** Cannot parse the input stream. */
yaml_PARSER_ERROR
/** Cannot compose a YAML document. */
yaml_COMPOSER_ERROR
/** Cannot write to the output stream. */
yaml_WRITER_ERROR
/** Cannot emit a YAML stream. */
yaml_EMITTER_ERROR
)
/** The pointer position. */
type YAML_mark_t struct {
/** The position index. */
index int
/** The position line. */
line int
/** The position column. */
column int
}
func (m YAML_mark_t) String() string {
return fmt.Sprintf("line %d, column %d", m.line, m.column)
}
/** @} */
/**
* @defgroup styles Node Styles
* @{
*/
type yaml_style_t int
/** Scalar styles. */
type yaml_scalar_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
/** The plain scalar style. */
yaml_PLAIN_SCALAR_STYLE
/** The single-quoted scalar style. */
yaml_SINGLE_QUOTED_SCALAR_STYLE
/** The double-quoted scalar style. */
yaml_DOUBLE_QUOTED_SCALAR_STYLE
/** The literal scalar style. */
yaml_LITERAL_SCALAR_STYLE
/** The folded scalar style. */
yaml_FOLDED_SCALAR_STYLE
)
/** Sequence styles. */
type yaml_sequence_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
/** The block sequence style. */
yaml_BLOCK_SEQUENCE_STYLE
/** The flow sequence style. */
yaml_FLOW_SEQUENCE_STYLE
)
/** Mapping styles. */
type yaml_mapping_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
/** The block mapping style. */
yaml_BLOCK_MAPPING_STYLE
/** The flow mapping style. */
yaml_FLOW_MAPPING_STYLE
/* yaml_FLOW_SET_MAPPING_STYLE */
)
/** @} */
/**
* @defgroup tokens Tokens
* @{
*/
/** Token types. */
type yaml_token_type_t int
const (
/** An empty token. */
yaml_NO_TOKEN yaml_token_type_t = iota
/** A STREAM-START token. */
yaml_STREAM_START_TOKEN
/** A STREAM-END token. */
yaml_STREAM_END_TOKEN
/** A VERSION-DIRECTIVE token. */
yaml_VERSION_DIRECTIVE_TOKEN
/** A TAG-DIRECTIVE token. */
yaml_TAG_DIRECTIVE_TOKEN
/** A DOCUMENT-START token. */
yaml_DOCUMENT_START_TOKEN
/** A DOCUMENT-END token. */
yaml_DOCUMENT_END_TOKEN
/** A BLOCK-SEQUENCE-START token. */
yaml_BLOCK_SEQUENCE_START_TOKEN
/** A BLOCK-SEQUENCE-END token. */
yaml_BLOCK_MAPPING_START_TOKEN
/** A BLOCK-END token. */
yaml_BLOCK_END_TOKEN
/** A FLOW-SEQUENCE-START token. */
yaml_FLOW_SEQUENCE_START_TOKEN
/** A FLOW-SEQUENCE-END token. */
yaml_FLOW_SEQUENCE_END_TOKEN
/** A FLOW-MAPPING-START token. */
yaml_FLOW_MAPPING_START_TOKEN
/** A FLOW-MAPPING-END token. */
yaml_FLOW_MAPPING_END_TOKEN
/** A BLOCK-ENTRY token. */
yaml_BLOCK_ENTRY_TOKEN
/** A FLOW-ENTRY token. */
yaml_FLOW_ENTRY_TOKEN
/** A KEY token. */
yaml_KEY_TOKEN
/** A VALUE token. */
yaml_VALUE_TOKEN
/** An ALIAS token. */
yaml_ALIAS_TOKEN
/** An ANCHOR token. */
yaml_ANCHOR_TOKEN
/** A TAG token. */
yaml_TAG_TOKEN
/** A SCALAR token. */
yaml_SCALAR_TOKEN
)
/** The token structure. */
type yaml_token_t struct {
/** The token type. */
token_type yaml_token_type_t
/** The token data. */
/** The stream start (for @c yaml_STREAM_START_TOKEN). */
encoding yaml_encoding_t
/** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
/** The anchor (for @c ). */
/** The scalar value (for @c ). */
value []byte
/** The tag suffix. */
suffix []byte
/** The scalar value (for @c yaml_SCALAR_TOKEN). */
/** The scalar style. */
style yaml_scalar_style_t
/** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
version_directive yaml_version_directive_t
/** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
prefix []byte
/** The beginning of the token. */
start_mark YAML_mark_t
/** The end of the token. */
end_mark YAML_mark_t
major, minor int
}
/**
* @defgroup events Events
* @{
*/
/** Event types. */
type yaml_event_type_t int
const (
/** An empty event. */
yaml_NO_EVENT yaml_event_type_t = iota
/** A STREAM-START event. */
yaml_STREAM_START_EVENT
/** A STREAM-END event. */
yaml_STREAM_END_EVENT
/** A DOCUMENT-START event. */
yaml_DOCUMENT_START_EVENT
/** A DOCUMENT-END event. */
yaml_DOCUMENT_END_EVENT
/** An ALIAS event. */
yaml_ALIAS_EVENT
/** A SCALAR event. */
yaml_SCALAR_EVENT
/** A SEQUENCE-START event. */
yaml_SEQUENCE_START_EVENT
/** A SEQUENCE-END event. */
yaml_SEQUENCE_END_EVENT
/** A MAPPING-START event. */
yaml_MAPPING_START_EVENT
/** A MAPPING-END event. */
yaml_MAPPING_END_EVENT
)
/** The event structure. */
type yaml_event_t struct {
/** The event type. */
event_type yaml_event_type_t
/** The stream parameters (for @c yaml_STREAM_START_EVENT). */
encoding yaml_encoding_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
version_directive *yaml_version_directive_t
/** The beginning and end of the tag directives list. */
tag_directives []yaml_tag_directive_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
/** Is the document indicator implicit? */
implicit bool
/** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The anchor. */
anchor []byte
/** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The tag. */
tag []byte
/** The scalar value. */
value []byte
/** Is the tag optional for the plain style? */
plain_implicit bool
/** Is the tag optional for any non-plain style? */
quoted_implicit bool
/** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The sequence style. */
/** The scalar style. */
style yaml_style_t
/** The beginning of the event. */
start_mark, end_mark YAML_mark_t
}
/**
* @defgroup nodes Nodes
* @{
*/
const (
/** The tag @c !!null with the only possible value: @c null. */
yaml_NULL_TAG = "tag:yaml.org,2002:null"
/** The tag @c !!bool with the values: @c true and @c falce. */
yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
/** The tag @c !!str for string values. */
yaml_STR_TAG = "tag:yaml.org,2002:str"
/** The tag @c !!int for integer values. */
yaml_INT_TAG = "tag:yaml.org,2002:int"
/** The tag @c !!float for float values. */
yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
/** The tag @c !!timestamp for date and time values. */
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
/** The tag @c !!seq is used to denote sequences. */
yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
/** The tag @c !!map is used to denote mapping. */
yaml_MAP_TAG = "tag:yaml.org,2002:map"
/** The default scalar tag is @c !!str. */
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
/** The default sequence tag is @c !!seq. */
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
/** The default mapping tag is @c !!map. */
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
)
/** Node types. */
type yaml_node_type_t int
const (
/** An empty node. */
yaml_NO_NODE yaml_node_type_t = iota
/** A scalar node. */
yaml_SCALAR_NODE
/** A sequence node. */
yaml_SEQUENCE_NODE
/** A mapping node. */
yaml_MAPPING_NODE
)
/** An element of a sequence node. */
type yaml_node_item_t int
/** An element of a mapping node. */
type yaml_node_pair_t struct {
/** The key of the element. */
key int
/** The value of the element. */
value int
}
/** The node structure. */
type yaml_node_t struct {
/** The node type. */
node_type yaml_node_type_t
/** The node tag. */
tag []byte
/** The scalar parameters (for @c yaml_SCALAR_NODE). */
scalar struct {
/** The scalar value. */
value []byte
/** The scalar style. */
style yaml_scalar_style_t
}
/** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
sequence struct {
/** The stack of sequence items. */
items []yaml_node_item_t
/** The sequence style. */
style yaml_sequence_style_t
}
/** The mapping parameters (for @c yaml_MAPPING_NODE). */
mapping struct {
/** The stack of mapping pairs (key, value). */
pairs []yaml_node_pair_t
/** The mapping style. */
style yaml_mapping_style_t
}
/** The beginning of the node. */
start_mark YAML_mark_t
/** The end of the node. */
end_mark YAML_mark_t
}
/** The document structure. */
type yaml_document_t struct {
/** The document nodes. */
nodes []yaml_node_t
/** The version directive. */
version_directive *yaml_version_directive_t
/** The list of tag directives. */
tags []yaml_tag_directive_t
/** Is the document start indicator implicit? */
start_implicit bool
/** Is the document end indicator implicit? */
end_implicit bool
/** The beginning of the document. */
start_mark YAML_mark_t
/** The end of the document. */
end_mark YAML_mark_t
}
/**
* The prototype of a read handler.
*
* The read handler is called when the parser needs to read more bytes from the
* source. The handler should write not more than @a size bytes to the @a
* buffer. The number of written bytes should be set to the @a length variable.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_parser_set_input().
* @param[out] buffer The buffer to write the data from the source.
* @param[in] size The size of the buffer.
* @param[out] size_read The actual number of bytes read from the source.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0. On EOF, the handler should set the
* @a size_read to @c 0 and return @c 1.
*/
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
/**
* This structure holds information about a potential simple key.
*/
type yaml_simple_key_t struct {
/** Is a simple key possible? */
possible bool
/** Is a simple key required? */
required bool
/** The number of the token. */
token_number int
/** The position mark. */
mark YAML_mark_t
}
/**
* The states of the parser.
*/
type yaml_parser_state_t int
const (
/** Expect STREAM-START. */
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
/** Expect the beginning of an implicit document. */
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
/** Expect DOCUMENT-START. */
yaml_PARSE_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_PARSE_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_PARSE_DOCUMENT_END_STATE
/** Expect a block node. */
yaml_PARSE_BLOCK_NODE_STATE
/** Expect a block node or indentless sequence. */
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
/** Expect a flow node. */
yaml_PARSE_FLOW_NODE_STATE
/** Expect the first entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
/** Expect an entry of an indentless sequence. */
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
/** Expect the first key of a block mapping. */
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect a block mapping key. */
yaml_PARSE_BLOCK_MAPPING_KEY_STATE
/** Expect a block mapping value. */
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
/** Expect the first entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
/** Expect a key of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
/** Expect a value of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
/** Expect the and of an ordered mapping entry. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
/** Expect the first key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_KEY_STATE
/** Expect a value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_VALUE_STATE
/** Expect an empty value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
/** Expect nothing. */
yaml_PARSE_END_STATE
)
/**
* This structure holds aliases data.
*/
type yaml_alias_data_t struct {
/** The anchor. */
anchor []byte
/** The node id. */
index int
/** The anchor mark. */
mark YAML_mark_t
}
/**
* The parser structure.
*
* All members are internal. Manage the structure using the @c yaml_parser_
* family of functions.
*/
type yaml_parser_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/** The byte about which the problem occured. */
problem_offset int
/** The problematic value (@c -1 is none). */
problem_value int
/** The problem position. */
problem_mark YAML_mark_t
/** The error context. */
context string
/** The context position. */
context_mark YAML_mark_t
/**
* @}
*/
/**
* @name Reader stuff
* @{
*/
/** Read handler. */
read_handler yaml_read_handler_t
/** Reader input data. */
input_reader io.Reader
input []byte
input_pos int
/** EOF flag */
eof bool
/** The working buffer. */
buffer []byte
buffer_pos int
/* The number of unread characters in the buffer. */
unread int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The input encoding. */
encoding yaml_encoding_t
/** The offset of the current position (in bytes). */
offset int
/** The mark of the current position. */
mark YAML_mark_t
/**
* @}
*/
/**
* @name Scanner stuff
* @{
*/
/** Have we started to scan the input stream? */
stream_start_produced bool
/** Have we reached the end of the input stream? */
stream_end_produced bool
/** The number of unclosed '[' and '{' indicators. */
flow_level int
/** The tokens queue. */
tokens []yaml_token_t
tokens_head int
/** The number of tokens fetched from the queue. */
tokens_parsed int
/* Does the tokens queue contain a token ready for dequeueing. */
token_available bool
/** The indentation levels stack. */
indents []int
/** The current indentation level. */
indent int
/** May a simple key occur at the current position? */
simple_key_allowed bool
/** The stack of simple keys. */
simple_keys []yaml_simple_key_t
/**
* @}
*/
/**
* @name Parser stuff
* @{
*/
/** The parser states stack. */
states []yaml_parser_state_t
/** The current parser state. */
state yaml_parser_state_t
/** The stack of marks. */
marks []YAML_mark_t
/** The list of TAG directives. */
tag_directives []yaml_tag_directive_t
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** The alias data. */
aliases []yaml_alias_data_t
/** The currently parsed document. */
document *yaml_document_t
/**
* @}
*/
}
/**
* The prototype of a write handler.
*
* The write handler is called when the emitter needs to flush the accumulated
* characters to the output. The handler should write @a size bytes of the
* @a buffer to the output.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_emitter_set_output().
* @param[in] buffer The buffer with bytes to be written.
* @param[in] size The size of the buffer.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0.
*/
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
/** The emitter states. */
type yaml_emitter_state_t int
const (
/** Expect STREAM-START. */
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
/** Expect the first DOCUMENT-START or STREAM-END. */
yaml_EMIT_FIRST_DOCUMENT_START_STATE
/** Expect DOCUMENT-START or STREAM-END. */
yaml_EMIT_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_EMIT_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_EMIT_DOCUMENT_END_STATE
/** Expect the first item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
/** Expect the first key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_KEY_STATE
/** Expect a value for a simple key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_VALUE_STATE
/** Expect the first item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
/** Expect the first key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect the key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_KEY_STATE
/** Expect a value for a simple key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
/** Expect nothing. */
yaml_EMIT_END_STATE
)
/**
* The emitter structure.
*
* All members are internal. Manage the structure using the @c yaml_emitter_
* family of functions.
*/
type yaml_emitter_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/**
* @}
*/
/**
* @name Writer stuff
* @{
*/
/** Write handler. */
write_handler yaml_write_handler_t
/** Standard (string or file) output data. */
output_buffer *[]byte
output_writer io.Writer
/** The working buffer. */
buffer []byte
buffer_pos int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The stream encoding. */
encoding yaml_encoding_t
/**
* @}
*/
/**
* @name Emitter stuff
* @{
*/
/** If the output is in the canonical style? */
canonical bool
/** The number of indentation spaces. */
best_indent int
/** The preferred width of the output lines. */
best_width int
/** Allow unescaped non-ASCII characters? */
unicode bool
/** The preferred line break. */
line_break yaml_break_t
/** The stack of states. */
states []yaml_emitter_state_t
/** The current emitter state. */
state yaml_emitter_state_t
/** The event queue. */
events []yaml_event_t
events_head int
/** The stack of indentation levels. */
indents []int
/** The list of tag directives. */
tag_directives []yaml_tag_directive_t
/** The current indentation level. */
indent int
/** The current flow level. */
flow_level int
/** Is it the document root context? */
root_context bool
/** Is it a sequence context? */
sequence_context bool
/** Is it a mapping context? */
mapping_context bool
/** Is it a simple mapping key context? */
simple_key_context bool
/** The current line. */
line int
/** The current column. */
column int
/** If the last character was a whitespace? */
whitespace bool
/** If the last character was an indentation character (' ', '-', '?', ':')? */
indention bool
/** If an explicit document end is required? */
open_ended bool
/** Anchor analysis. */
anchor_data struct {
/** The anchor value. */
anchor []byte
/** Is it an alias? */
alias bool
}
/** Tag analysis. */
tag_data struct {
/** The tag handle. */
handle []byte
/** The tag suffix. */
suffix []byte
}
/** Scalar analysis. */
scalar_data struct {
/** The scalar value. */
value []byte
/** Does the scalar contain line breaks? */
multiline bool
/** Can the scalar be expessed in the flow plain style? */
flow_plain_allowed bool
/** Can the scalar be expressed in the block plain style? */
block_plain_allowed bool
/** Can the scalar be expressed in the single quoted style? */
single_quoted_allowed bool
/** Can the scalar be expressed in the literal or folded styles? */
block_allowed bool
/** The output style. */
style yaml_scalar_style_t
}
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** If the stream was already opened? */
opened bool
/** If the stream was already closed? */
closed bool
/** The information associated with the document nodes. */
anchors *struct {
/** The number of references. */
references int
/** The anchor id. */
anchor int
/** If the node has been emitted? */
serialized bool
}
/** The last assigned anchor id. */
last_anchor_id int
/** The currently emitted document. */
document *yaml_document_t
/**
* @}
*/
}

View File

@ -138,7 +138,7 @@ type Canonical interface {
func SplitHostname(named Named) (string, string) {
name := named.Name()
match := anchoredNameRegexp.FindStringSubmatch(name)
if match == nil || len(match) != 3 {
if len(match) != 3 {
return "", name
}
return match[1], match[2]

BIN
vendor/github.com/go-openapi/spec/debug.test generated vendored Executable file

Binary file not shown.

View File

@ -181,6 +181,7 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
if startingRef == nil {
return nil
}
if ptr == nil {
return startingRef
}
@ -215,6 +216,7 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
}
}
return ret
}
@ -226,6 +228,7 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
}
oldRef := currentRef
if currentRef != nil {
var err error
currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer()))
@ -233,6 +236,7 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
return err
}
}
if currentRef == nil {
currentRef = ref
}
@ -289,7 +293,27 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
if currentRef.String() != "" {
res, _, err = currentRef.GetPointer().Get(data)
if err != nil {
return err
if strings.HasPrefix(ref.String(), "#") {
// go back to original spec
newUrl := r.loadingRef.GetURL().String()
refURL, err = url.Parse(newUrl + ref.String())
if err != nil {
return err
}
}
data, _, _, err = r.load(refURL)
if err != nil {
return err
}
res, _, err = ref.GetPointer().Get(data)
if err != nil {
return err
}
}
} else {
res = data
@ -400,7 +424,6 @@ func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error
rid, _ := NewRef(root.(*Swagger).ID)
rrr, _ = rid.Inherits(nrr)
}
}
resolver, err := defaultSchemaLoader(root, rrr, cache)
@ -425,7 +448,15 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S
if target.Items.Schema != nil {
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
if err != nil {
return nil, err
if target.Items.Schema.ID == "" {
target.Items.Schema.ID = target.ID
if err != nil {
t, err = expandSchema(*target.Items.Schema, parentRefs, resolver)
if err != nil {
return nil, err
}
}
}
}
*target.Items.Schema = *t
}
@ -451,6 +482,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
// t is the new expanded schema
var t *Schema
for target.Ref.String() != "" {
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
return &target, nil
@ -459,6 +491,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
if err := resolver.Resolve(&target.Ref, &t); err != nil {
return &target, err
}
parentRefs = append(parentRefs, target.Ref.String())
target = *t
}

View File

@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {

52
vendor/golang.org/x/text/internal/gen.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"log"
"golang.org/x/text/internal/gen"
"golang.org/x/text/language"
"golang.org/x/text/unicode/cldr"
)
func main() {
r := gen.OpenCLDRCoreZip()
defer r.Close()
d := &cldr.Decoder{}
data, err := d.DecodeZip(r)
if err != nil {
log.Fatalf("DecodeZip: %v", err)
}
w := gen.NewCodeWriter()
defer w.WriteGoFile("tables.go", "internal")
// Create parents table.
parents := make([]uint16, language.NumCompactTags)
for _, loc := range data.Locales() {
tag := language.MustParse(loc)
index, ok := language.CompactIndex(tag)
if !ok {
continue
}
parentIndex := 0 // und
for p := tag.Parent(); p != language.Und; p = p.Parent() {
if x, ok := language.CompactIndex(p); ok {
parentIndex = x
break
}
}
parents[index] = uint16(parentIndex)
}
w.WriteComment(`
Parent maps a compact index of a tag to the compact index of the parent of
this tag.`)
w.WriteVar("Parent", parents)
}

51
vendor/golang.org/x/text/internal/internal.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package internal contains non-exported functionality that are used by
// packages in the text repository.
package internal
import (
"sort"
"golang.org/x/text/language"
)
// SortTags sorts tags in place.
func SortTags(tags []language.Tag) {
sort.Sort(sorter(tags))
}
type sorter []language.Tag
func (s sorter) Len() int {
return len(s)
}
func (s sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sorter) Less(i, j int) bool {
return s[i].String() < s[j].String()
}
// UniqueTags sorts and filters duplicate tags in place and returns a slice with
// only unique tags.
func UniqueTags(tags []language.Tag) []language.Tag {
if len(tags) <= 1 {
return tags
}
SortTags(tags)
k := 0
for i := 1; i < len(tags); i++ {
if tags[k].String() < tags[i].String() {
k++
tags[k] = tags[i]
}
}
return tags[:k+1]
}

67
vendor/golang.org/x/text/internal/match.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
// This file contains matchers that implement CLDR inheritance.
//
// See http://unicode.org/reports/tr35/#Locale_Inheritance.
//
// Some of the inheritance described in this document is already handled by
// the cldr package.
import (
"golang.org/x/text/language"
)
// TODO: consider if (some of the) matching algorithm needs to be public after
// getting some feel about what is generic and what is specific.
// NewInheritanceMatcher returns a matcher that matches based on the inheritance
// chain.
//
// The matcher uses canonicalization and the parent relationship to find a
// match. The resulting match will always be either Und or a language with the
// same language and script as the requested language. It will not match
// languages for which there is understood to be mutual or one-directional
// intelligibility.
//
// A Match will indicate an Exact match if the language matches after
// canonicalization and High if the matched tag is a parent.
func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher {
tags := &InheritanceMatcher{make(map[language.Tag]int)}
for i, tag := range t {
ct, err := language.All.Canonicalize(tag)
if err != nil {
ct = tag
}
tags.index[ct] = i
}
return tags
}
type InheritanceMatcher struct {
index map[language.Tag]int
}
func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) {
for _, t := range want {
ct, err := language.All.Canonicalize(t)
if err != nil {
ct = t
}
conf := language.Exact
for {
if index, ok := m.index[ct]; ok {
return ct, index, conf
}
if ct == language.Und {
break
}
ct = ct.Parent()
conf = language.High
}
}
return language.Und, 0, language.No
}

116
vendor/golang.org/x/text/internal/tables.go generated vendored Normal file
View File

@ -0,0 +1,116 @@
// This file was generated by go generate; DO NOT EDIT
package internal
// Parent maps a compact index of a tag to the compact index of the parent of
// this tag.
var Parent = []uint16{ // 752 elements
// Entry 0 - 3F
0x0000, 0x0053, 0x00e5, 0x0000, 0x0003, 0x0003, 0x0000, 0x0006,
0x0000, 0x0008, 0x0000, 0x000a, 0x0000, 0x000c, 0x000c, 0x000c,
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c, 0x000c,
0x000c, 0x0000, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x002e,
0x0000, 0x0000, 0x0031, 0x0030, 0x0030, 0x0000, 0x0035, 0x0000,
0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x003d, 0x0000,
// Entry 40 - 7F
0x0000, 0x0040, 0x0000, 0x0042, 0x0042, 0x0000, 0x0045, 0x0045,
0x0000, 0x0048, 0x0000, 0x004a, 0x0000, 0x0000, 0x004d, 0x004c,
0x004c, 0x0000, 0x0051, 0x0051, 0x0051, 0x0051, 0x0000, 0x0056,
0x0000, 0x0058, 0x0000, 0x005a, 0x0000, 0x005c, 0x005c, 0x0000,
0x005f, 0x0000, 0x0061, 0x0000, 0x0063, 0x0000, 0x0065, 0x0065,
0x0000, 0x0068, 0x0000, 0x006a, 0x006a, 0x006a, 0x006a, 0x006a,
0x006a, 0x006a, 0x0000, 0x0072, 0x0000, 0x0074, 0x0000, 0x0076,
0x0000, 0x0000, 0x0079, 0x0000, 0x007b, 0x0000, 0x007d, 0x0000,
// Entry 80 - BF
0x007f, 0x007f, 0x0000, 0x0082, 0x0082, 0x0000, 0x0085, 0x0086,
0x0086, 0x0086, 0x0085, 0x0087, 0x0086, 0x0086, 0x0086, 0x0085,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086,
0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0087, 0x0086, 0x0086,
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0086, 0x0085, 0x0086,
// Entry C0 - FF
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0085,
0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0087, 0x0086, 0x0086,
0x0087, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086,
0x0086, 0x0086, 0x0086, 0x0086, 0x0085, 0x0085, 0x0086, 0x0086,
0x0085, 0x0086, 0x0086, 0x0086, 0x0086, 0x0086, 0x0000, 0x00ee,
0x0000, 0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f1,
0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f0, 0x00f0, 0x00f1, 0x00f1,
// Entry 100 - 13F
0x00f0, 0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x00f0, 0x00f1, 0x00f1,
0x00f1, 0x00f1, 0x00f1, 0x00f1, 0x0000, 0x010c, 0x0000, 0x010e,
0x0000, 0x0110, 0x0000, 0x0112, 0x0112, 0x0000, 0x0115, 0x0115,
0x0115, 0x0115, 0x0000, 0x011a, 0x0000, 0x011c, 0x0000, 0x011e,
0x011e, 0x0000, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
// Entry 140 - 17F
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121, 0x0121,
0x0000, 0x0150, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156,
0x0000, 0x0158, 0x0000, 0x015a, 0x015a, 0x015a, 0x0000, 0x015e,
0x0000, 0x0000, 0x0161, 0x0000, 0x0163, 0x0000, 0x0165, 0x0165,
0x0165, 0x0000, 0x0169, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000,
0x016f, 0x016f, 0x0000, 0x0172, 0x0000, 0x0174, 0x0000, 0x0176,
0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e,
// Entry 180 - 1BF
0x0000, 0x0180, 0x0180, 0x0180, 0x0000, 0x0000, 0x0185, 0x0000,
0x0000, 0x0188, 0x0000, 0x018a, 0x0000, 0x0000, 0x018d, 0x0000,
0x018f, 0x0000, 0x0000, 0x0192, 0x0000, 0x0000, 0x0195, 0x0000,
0x0197, 0x0000, 0x0199, 0x0000, 0x019b, 0x0000, 0x019d, 0x0000,
0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000,
0x01a7, 0x0000, 0x01a9, 0x01a9, 0x0000, 0x01ac, 0x0000, 0x01ae,
0x0000, 0x01b0, 0x0000, 0x01b2, 0x0000, 0x01b4, 0x0000, 0x0000,
0x01b7, 0x0000, 0x01b9, 0x0000, 0x01bb, 0x0000, 0x01bd, 0x0000,
// Entry 1C0 - 1FF
0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x01c3, 0x01c3, 0x01c3,
0x0000, 0x01c8, 0x0000, 0x01ca, 0x01ca, 0x0000, 0x01cd, 0x0000,
0x01cf, 0x0000, 0x01d1, 0x0000, 0x01d3, 0x0000, 0x01d5, 0x0000,
0x01d7, 0x01d7, 0x0000, 0x01da, 0x0000, 0x01dc, 0x0000, 0x01de,
0x0000, 0x01e0, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x01ec, 0x01ec,
0x0000, 0x01f0, 0x0000, 0x01f2, 0x0000, 0x01f4, 0x0000, 0x01f6,
0x0000, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x01fb, 0x0000, 0x01fe,
// Entry 200 - 23F
0x0000, 0x0200, 0x0200, 0x0000, 0x0203, 0x0203, 0x0000, 0x0206,
0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0206, 0x0000, 0x020e,
0x0000, 0x0210, 0x0000, 0x0212, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0218, 0x0000, 0x0000, 0x021b, 0x0000, 0x021d, 0x021d,
0x0000, 0x0220, 0x0000, 0x0222, 0x0222, 0x0000, 0x0000, 0x0226,
0x0225, 0x0225, 0x0000, 0x0000, 0x022b, 0x0000, 0x022d, 0x0000,
0x022f, 0x0000, 0x023b, 0x0231, 0x023b, 0x023b, 0x023b, 0x023b,
0x023b, 0x023b, 0x023b, 0x0231, 0x023b, 0x023b, 0x0000, 0x023e,
// Entry 240 - 27F
0x023e, 0x023e, 0x0000, 0x0242, 0x0000, 0x0244, 0x0000, 0x0246,
0x0246, 0x0000, 0x0249, 0x0000, 0x024b, 0x024b, 0x024b, 0x024b,
0x024b, 0x024b, 0x0000, 0x0252, 0x0000, 0x0254, 0x0000, 0x0256,
0x0000, 0x0258, 0x0000, 0x025a, 0x0000, 0x0000, 0x025d, 0x025d,
0x025d, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000,
0x0000, 0x0268, 0x0267, 0x0267, 0x0000, 0x026c, 0x0000, 0x026e,
0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0000, 0x0275, 0x0000,
0x0000, 0x0278, 0x0000, 0x027a, 0x027a, 0x027a, 0x027a, 0x0000,
// Entry 280 - 2BF
0x027f, 0x027f, 0x027f, 0x0000, 0x0283, 0x0283, 0x0283, 0x0283,
0x0283, 0x0000, 0x0289, 0x0289, 0x0289, 0x0289, 0x0000, 0x0000,
0x0000, 0x0000, 0x0291, 0x0291, 0x0291, 0x0000, 0x0295, 0x0295,
0x0295, 0x0295, 0x0000, 0x0000, 0x029b, 0x029b, 0x029b, 0x029b,
0x0000, 0x02a0, 0x0000, 0x02a2, 0x02a2, 0x0000, 0x02a5, 0x0000,
0x02a7, 0x02a7, 0x0000, 0x0000, 0x02ab, 0x0000, 0x0000, 0x02ae,
0x0000, 0x02b0, 0x02b0, 0x0000, 0x0000, 0x02b4, 0x0000, 0x02b6,
0x0000, 0x02b8, 0x0000, 0x02ba, 0x0000, 0x02bc, 0x02bc, 0x0000,
// Entry 2C0 - 2FF
0x0000, 0x02c0, 0x0000, 0x02c2, 0x02bf, 0x02bf, 0x0000, 0x0000,
0x02c7, 0x02c6, 0x02c6, 0x0000, 0x0000, 0x02cc, 0x0000, 0x02ce,
0x0000, 0x02d0, 0x0000, 0x0000, 0x02d3, 0x0000, 0x0000, 0x0000,
0x02d7, 0x0000, 0x02d9, 0x0000, 0x02db, 0x0000, 0x02dd, 0x02dd,
0x0000, 0x02e0, 0x0000, 0x02e2, 0x0000, 0x02e4, 0x02e4, 0x02e4,
0x02e4, 0x02e4, 0x0000, 0x02ea, 0x02eb, 0x02ea, 0x0000, 0x02ee,
} // Size: 1528 bytes
// Total table size 1528 bytes (1KiB); checksum: B99CF952

File diff suppressed because it is too large Load Diff

View File

@ -678,6 +678,8 @@ func (b *builder) parseIndices() {
b.locale.parse(meta.DefaultContent.Locales)
}
// TODO: region inclusion data will probably not be use used in future matchers.
func (b *builder) computeRegionGroups() {
b.groups = make(map[int]index)
@ -686,6 +688,11 @@ func (b *builder) computeRegionGroups() {
b.groups[i] = index(len(b.groups))
}
for _, g := range b.supp.TerritoryContainment.Group {
// Skip UN and EURO zone as they are flattening the containment
// relationship.
if g.Type == "EZ" || g.Type == "UN" {
continue
}
group := b.region.index(g.Type)
if _, ok := b.groups[group]; !ok {
b.groups[group] = index(len(b.groups))
@ -1483,6 +1490,11 @@ func (b *builder) writeRegionInclusionData() {
containment = make(map[index][]index)
)
for _, g := range b.supp.TerritoryContainment.Group {
// Skip UN and EURO zone as they are flattening the containment
// relationship.
if g.Type == "EZ" || g.Type == "UN" {
continue
}
group := b.region.index(g.Type)
groupIdx := b.groups[group]
for _, mem := range strings.Split(g.Contains, " ") {

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,8 @@
package internal
import (
"os"
"github.com/golang/protobuf/proto"
netcontext "golang.org/x/net/context"
)
@ -84,3 +86,31 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
return withNamespace(ctx, namespace)
}
// SetTestEnv sets the env variables for testing background ticket in Flex.
func SetTestEnv() func() {
var environ = []struct {
key, value string
}{
{"GAE_LONG_APP_ID", "my-app-id"},
{"GAE_MINOR_VERSION", "067924799508853122"},
{"GAE_MODULE_INSTANCE", "0"},
{"GAE_MODULE_NAME", "default"},
{"GAE_MODULE_VERSION", "20150612t184001"},
}
for _, v := range environ {
old := os.Getenv(v.key)
os.Setenv(v.key, v.value)
v.value = old
}
return func() { // Restore old environment after the test completes.
for _, v := range environ {
if v.value == "" {
os.Unsetenv(v.key)
continue
}
os.Setenv(v.key, v.value)
}
}
}

View File

@ -123,6 +123,7 @@ func IsStandardContainerResourceName(str string) bool {
var standardLimitRangeTypes = sets.NewString(
string(LimitTypePod),
string(LimitTypeContainer),
string(LimitTypePersistentVolumeClaim),
)
// IsStandardLimitRangeType returns true if the type is Pod or Container

View File

@ -2684,6 +2684,8 @@ const (
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind

View File

@ -3138,6 +3138,8 @@ const (
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.

File diff suppressed because it is too large Load Diff

View File

@ -385,6 +385,10 @@ type DaemonSetStatus struct {
// DesiredNumberScheduled is the total number of nodes that should be running the daemon
// pod (including nodes correctly running the daemon pod).
DesiredNumberScheduled int32 `json:"desiredNumberScheduled"`
// NumberReady is the number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
NumberReady int32 `json:"numberReady"`
}
// +genclient=true

View File

@ -797,6 +797,9 @@ func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) {
data[i] = 0x18
i++
i = encodeVarintGenerated(data, i, uint64(m.DesiredNumberScheduled))
data[i] = 0x20
i++
i = encodeVarintGenerated(data, i, uint64(m.NumberReady))
return i, nil
}
@ -3305,6 +3308,7 @@ func (m *DaemonSetStatus) Size() (n int) {
n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled))
n += 1 + sovGenerated(uint64(m.NumberMisscheduled))
n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled))
n += 1 + sovGenerated(uint64(m.NumberReady))
return n
}
@ -4272,6 +4276,7 @@ func (this *DaemonSetStatus) String() string {
`CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`,
`NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`,
`DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`,
`NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`,
`}`,
}, "")
return s
@ -5995,6 +6000,25 @@ func (m *DaemonSetStatus) Unmarshal(data []byte) error {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType)
}
m.NumberReady = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.NumberReady |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipGenerated(data[iNdEx:])
@ -14064,249 +14088,250 @@ var (
)
var fileDescriptorGenerated = []byte{
// 3893 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x5b, 0xdb, 0x8f, 0x1c, 0x47,
0xd5, 0x4f, 0xcf, 0xec, 0x65, 0xa6, 0xf6, 0xe2, 0x75, 0x79, 0x6d, 0x4f, 0x36, 0x89, 0x9d, 0x74,
0xf4, 0xe5, 0xa2, 0x2f, 0x9e, 0xfd, 0xec, 0x2f, 0x09, 0x8e, 0x93, 0x38, 0xd9, 0xd9, 0x8b, 0xed,
0x64, 0xd7, 0x9e, 0xd4, 0xac, 0x9d, 0x90, 0x2b, 0xbd, 0x33, 0xb5, 0xb3, 0xed, 0xed, 0x99, 0x9e,
0xf4, 0x65, 0xb3, 0x13, 0x84, 0x08, 0x02, 0x24, 0x5e, 0x12, 0xf2, 0x46, 0x24, 0xe0, 0x01, 0x09,
0xc4, 0x13, 0x11, 0x48, 0x48, 0x79, 0xe0, 0x05, 0x22, 0x21, 0xcc, 0x03, 0x22, 0x20, 0x10, 0x3c,
0x40, 0x12, 0x82, 0x20, 0xe2, 0x5f, 0x08, 0x3c, 0x70, 0xaa, 0xba, 0xfa, 0x52, 0x3d, 0xdd, 0x63,
0xf7, 0xec, 0x45, 0x20, 0x78, 0x58, 0xd9, 0x5d, 0x75, 0xce, 0xef, 0x9c, 0x3a, 0x75, 0xea, 0xd4,
0xa9, 0xaa, 0x33, 0xe8, 0xd1, 0xcd, 0xd3, 0x76, 0x59, 0x37, 0x67, 0x37, 0xdd, 0x35, 0x6a, 0xb5,
0xa9, 0x43, 0xed, 0xd9, 0xce, 0x66, 0x73, 0x56, 0xeb, 0xe8, 0xf6, 0x2c, 0xdd, 0x76, 0x68, 0xdb,
0xd6, 0xcd, 0xb6, 0x3d, 0xbb, 0x75, 0x72, 0x8d, 0x3a, 0xda, 0xc9, 0xd9, 0x26, 0x6d, 0x53, 0x4b,
0x73, 0x68, 0xa3, 0xdc, 0xb1, 0x4c, 0xc7, 0xc4, 0x27, 0x3c, 0xf6, 0x72, 0xc8, 0x5e, 0x06, 0xf6,
0x32, 0x63, 0x2f, 0x87, 0xec, 0x65, 0xc1, 0x3e, 0x73, 0xa2, 0xa9, 0x3b, 0x1b, 0xee, 0x5a, 0xb9,
0x6e, 0xb6, 0x66, 0x9b, 0x66, 0xd3, 0x9c, 0xe5, 0x28, 0x6b, 0xee, 0x3a, 0xff, 0xe2, 0x1f, 0xfc,
0x7f, 0x1e, 0xfa, 0xcc, 0xa9, 0x54, 0xe5, 0x66, 0x2d, 0x6a, 0x9b, 0xae, 0x55, 0xa7, 0x71, 0x8d,
0x66, 0x1e, 0x48, 0xe7, 0x71, 0xdb, 0x5b, 0xd4, 0x62, 0x0a, 0xd1, 0x46, 0x0f, 0xdb, 0x7d, 0xe9,
0x6c, 0x5b, 0x3d, 0xc3, 0x9e, 0x39, 0x91, 0x4c, 0x6d, 0xb9, 0x6d, 0x47, 0x6f, 0xf5, 0xea, 0x74,
0x32, 0x99, 0xdc, 0x75, 0x74, 0x63, 0x56, 0x6f, 0x3b, 0xb6, 0x63, 0xc5, 0x59, 0xd4, 0x32, 0x42,
0x73, 0xd5, 0x0b, 0x57, 0x3c, 0x7d, 0xf1, 0xed, 0x68, 0xa8, 0xad, 0xb5, 0x68, 0x49, 0xb9, 0x5d,
0xb9, 0xa7, 0x58, 0x19, 0xbf, 0xf6, 0xfe, 0xf1, 0x9b, 0x3e, 0x7a, 0xff, 0xf8, 0xd0, 0x45, 0x68,
0x23, 0xbc, 0x47, 0x7d, 0x1e, 0x4d, 0xcf, 0x57, 0x2f, 0xaf, 0x6a, 0x56, 0x93, 0x3a, 0x97, 0x01,
0x57, 0x7f, 0x55, 0x73, 0x18, 0xe7, 0x02, 0x9a, 0x72, 0x78, 0x63, 0x95, 0x82, 0xb5, 0xda, 0x8e,
0xd6, 0xf4, 0x50, 0x86, 0x2b, 0x25, 0x81, 0x32, 0xb5, 0x1a, 0xeb, 0x27, 0x3d, 0x1c, 0xea, 0xd7,
0x14, 0x74, 0xf3, 0xbc, 0x6b, 0x3b, 0x66, 0x6b, 0x85, 0x3a, 0x96, 0x5e, 0x9f, 0x77, 0x2d, 0x0b,
0xba, 0x6a, 0x8e, 0xe6, 0xb8, 0xf6, 0xf5, 0xb5, 0xc3, 0xcf, 0xa0, 0xe1, 0x2d, 0xcd, 0x70, 0x69,
0x29, 0x07, 0x24, 0x63, 0xa7, 0xee, 0x2b, 0xa7, 0xba, 0x4d, 0xd9, 0x9f, 0xd8, 0xf2, 0x53, 0xae,
0x06, 0xd6, 0x74, 0xba, 0x95, 0x69, 0x01, 0x38, 0x2e, 0xa4, 0x5e, 0x61, 0x48, 0xc4, 0x03, 0x54,
0xdf, 0x50, 0xd0, 0x6d, 0xa9, 0x9a, 0x2d, 0xeb, 0xb6, 0x83, 0x5b, 0x68, 0x58, 0x77, 0x68, 0xcb,
0x06, 0xf5, 0xf2, 0x20, 0xfb, 0x7c, 0x39, 0x93, 0xcb, 0x96, 0x53, 0xc1, 0x2b, 0x13, 0x42, 0xaf,
0xe1, 0x0b, 0x0c, 0x9e, 0x78, 0x52, 0xd4, 0xaf, 0x2a, 0x08, 0x47, 0x79, 0x3c, 0xeb, 0xde, 0x80,
0x8d, 0x9e, 0xde, 0x89, 0x8d, 0x0e, 0x09, 0xc0, 0x31, 0x4f, 0x9c, 0x64, 0xa2, 0xd7, 0x14, 0x74,
0xa4, 0x57, 0x23, 0x6e, 0x9b, 0x75, 0xd9, 0x36, 0x73, 0x3b, 0xb0, 0x8d, 0x87, 0x9a, 0x62, 0x94,
0xef, 0xe5, 0x50, 0x71, 0x41, 0xa3, 0x2d, 0xb3, 0x5d, 0x03, 0x5b, 0x3c, 0x83, 0x0a, 0x2d, 0xe0,
0x6f, 0x68, 0x8e, 0xc6, 0xed, 0x31, 0x76, 0xea, 0x9e, 0x3e, 0x83, 0xdd, 0x3a, 0x59, 0xbe, 0xb4,
0x76, 0x95, 0xd6, 0x1d, 0x90, 0xa3, 0x55, 0xb0, 0xc0, 0x47, 0x61, 0x1b, 0x09, 0xd0, 0xf0, 0x8b,
0x68, 0xc8, 0xee, 0xd0, 0xba, 0x30, 0xe1, 0x23, 0x19, 0x87, 0x13, 0x68, 0x58, 0x03, 0x8c, 0x70,
0x8e, 0xd8, 0x17, 0xe1, 0xb8, 0x60, 0xaf, 0x11, 0x9b, 0x4f, 0x7e, 0x29, 0xcf, 0x25, 0x9c, 0x1d,
0x58, 0x82, 0xe7, 0x42, 0x93, 0x42, 0xc6, 0x88, 0xf7, 0x4d, 0x04, 0xba, 0xfa, 0x0b, 0x05, 0x4d,
0x04, 0xb4, 0x7c, 0xa6, 0x5e, 0xe8, 0xb1, 0xd9, 0x6c, 0x1f, 0x9b, 0x45, 0x22, 0x5d, 0x99, 0xb1,
0x73, 0xd3, 0x4d, 0x09, 0x61, 0x05, 0xbf, 0x25, 0x62, 0xb8, 0x17, 0x7c, 0x47, 0xc8, 0x71, 0x47,
0x38, 0x3d, 0xe8, 0xb8, 0x52, 0xe6, 0xff, 0x37, 0xd1, 0xf1, 0xd4, 0x3c, 0x4b, 0x16, 0x6c, 0x6a,
0xc0, 0x0c, 0x9a, 0x96, 0x18, 0x4f, 0xd6, 0xd9, 0x5a, 0xd6, 0xd6, 0xa8, 0x51, 0x13, 0x18, 0x95,
0x71, 0x36, 0x30, 0xff, 0x8b, 0x04, 0xd8, 0xf8, 0x39, 0x54, 0x00, 0x0d, 0x3a, 0x06, 0x84, 0x56,
0xe1, 0x15, 0x27, 0xfa, 0xfb, 0x5a, 0xd5, 0x6c, 0xac, 0x0a, 0x06, 0xee, 0x06, 0x81, 0xd5, 0xfc,
0x56, 0x12, 0x00, 0xaa, 0xaf, 0xe7, 0xd0, 0x81, 0xd8, 0x94, 0xe2, 0x2b, 0xe8, 0x48, 0xdd, 0x0b,
0x13, 0x17, 0xdd, 0x16, 0x08, 0xa8, 0xd5, 0x37, 0x68, 0xc3, 0x35, 0x68, 0x43, 0x84, 0xdd, 0x63,
0x02, 0xef, 0xc8, 0x7c, 0x22, 0x15, 0x49, 0xe1, 0xc6, 0x4f, 0x20, 0xdc, 0xe6, 0x4d, 0x2b, 0xba,
0x6d, 0x07, 0x98, 0x39, 0x8e, 0x39, 0x23, 0x30, 0xf1, 0xc5, 0x1e, 0x0a, 0x92, 0xc0, 0xc5, 0x74,
0x6c, 0x50, 0x5b, 0xb7, 0x68, 0x23, 0xae, 0x63, 0x5e, 0xd6, 0x71, 0x21, 0x91, 0x8a, 0xa4, 0x70,
0xab, 0xdf, 0xcf, 0x21, 0xb4, 0x40, 0x3b, 0x86, 0xd9, 0x6d, 0xc1, 0x08, 0xf6, 0x70, 0x9d, 0xbf,
0x24, 0xad, 0xf3, 0x47, 0xb3, 0x7a, 0x6b, 0xa0, 0x62, 0xea, 0x42, 0x6f, 0xc6, 0x16, 0xfa, 0x63,
0x83, 0x8b, 0xe8, 0xbf, 0xd2, 0x7f, 0xa9, 0xa0, 0xc9, 0x90, 0x78, 0x3f, 0x96, 0xfa, 0x8b, 0xf2,
0x52, 0x7f, 0x68, 0xe0, 0x91, 0xa5, 0xac, 0xf5, 0xb7, 0xf2, 0x08, 0x87, 0x44, 0xc4, 0x34, 0x8c,
0x35, 0xad, 0xbe, 0x79, 0x03, 0x1b, 0xe0, 0x77, 0x60, 0xe7, 0x74, 0x3b, 0x0d, 0x96, 0x04, 0xcd,
0xb5, 0xdb, 0xa6, 0xc3, 0x13, 0x18, 0x5f, 0xcd, 0x4f, 0x0f, 0xac, 0xa6, 0xaf, 0x41, 0xf9, 0x72,
0x0f, 0xf6, 0x62, 0xdb, 0xb1, 0xba, 0xe1, 0xea, 0xe9, 0x25, 0x20, 0x09, 0x0a, 0xe1, 0x97, 0x11,
0xb2, 0x04, 0xe6, 0xaa, 0x29, 0xfc, 0x23, 0xab, 0x0b, 0xfa, 0x4a, 0xcd, 0x9b, 0xed, 0x75, 0xbd,
0x19, 0x7a, 0x3b, 0x09, 0x80, 0x49, 0x44, 0xc8, 0xcc, 0x22, 0x3a, 0x9a, 0xa2, 0x3d, 0x9e, 0x42,
0xf9, 0x4d, 0xda, 0xf5, 0xcc, 0x4a, 0xd8, 0x7f, 0xf1, 0x74, 0x34, 0x91, 0x28, 0x8a, 0x2c, 0xe0,
0x4c, 0xee, 0xb4, 0xa2, 0xbe, 0x35, 0x1c, 0x75, 0x36, 0x1e, 0x87, 0xef, 0x41, 0x05, 0x0b, 0x5a,
0xf4, 0xba, 0x66, 0x8b, 0x00, 0xc5, 0x23, 0x29, 0x11, 0x6d, 0x24, 0xe8, 0x95, 0x22, 0x76, 0x6e,
0x9f, 0x22, 0x76, 0x7e, 0x97, 0x23, 0x36, 0x36, 0x61, 0x10, 0x0e, 0xcb, 0xb3, 0x9b, 0xdd, 0xd2,
0x10, 0x07, 0x9f, 0xdb, 0xc1, 0xca, 0xf6, 0x80, 0x42, 0x81, 0x7e, 0x0b, 0x09, 0x84, 0xe0, 0x39,
0x74, 0xa0, 0xa5, 0xb7, 0x09, 0xd5, 0x1a, 0xdd, 0x1a, 0xad, 0x9b, 0xed, 0x86, 0x5d, 0x1a, 0xe6,
0x66, 0x3e, 0x2a, 0x98, 0x0e, 0xac, 0xc8, 0xdd, 0x24, 0x4e, 0x8f, 0x97, 0xd1, 0xb4, 0x45, 0xb7,
0x74, 0xa6, 0xc6, 0x79, 0x58, 0xce, 0xa6, 0xd5, 0x5d, 0xd6, 0x5b, 0xba, 0x53, 0x1a, 0xf1, 0xd2,
0x78, 0xc0, 0x98, 0x26, 0x09, 0xfd, 0x24, 0x91, 0x0b, 0xdf, 0x85, 0x46, 0x3a, 0x9a, 0x6b, 0x43,
0xac, 0x1f, 0x05, 0xfe, 0x42, 0x18, 0x98, 0xaa, 0xbc, 0x95, 0x88, 0x5e, 0x48, 0x9b, 0xa3, 0x5e,
0x5e, 0xd8, 0x0d, 0x2f, 0x9f, 0x4c, 0xf7, 0x70, 0xf5, 0xe3, 0x1c, 0x9a, 0x8a, 0x07, 0x4d, 0xb6,
0xe7, 0x99, 0x6b, 0x36, 0xb5, 0xb6, 0x68, 0xe3, 0x9c, 0x77, 0x3e, 0x02, 0x78, 0xee, 0xa6, 0xf9,
0x70, 0xd5, 0x5e, 0xea, 0xa1, 0x20, 0x09, 0x5c, 0xf8, 0xbe, 0x88, 0xa3, 0x7b, 0xbb, 0x66, 0x30,
0x6d, 0x09, 0xce, 0x0e, 0xd3, 0x26, 0x56, 0xbe, 0xdf, 0x29, 0xb6, 0xc6, 0x60, 0xda, 0x2e, 0xcb,
0xdd, 0x24, 0x4e, 0x8f, 0xcf, 0xa1, 0x83, 0xda, 0x96, 0xa6, 0x1b, 0xda, 0x9a, 0x41, 0x03, 0x90,
0x21, 0x0e, 0x72, 0xb3, 0x00, 0x39, 0x38, 0x17, 0x27, 0x20, 0xbd, 0x3c, 0x78, 0x05, 0x1d, 0x72,
0xdb, 0xbd, 0x50, 0x9e, 0x1b, 0xdd, 0x22, 0xa0, 0x0e, 0x5d, 0xee, 0x25, 0x21, 0x49, 0x7c, 0xea,
0xaf, 0x94, 0x68, 0x7c, 0xf6, 0x5d, 0x16, 0x9f, 0x41, 0x43, 0x4e, 0xb7, 0xe3, 0xc7, 0xe7, 0xbb,
0xfc, 0xf8, 0xbc, 0x0a, 0x6d, 0x9f, 0xf0, 0x4c, 0x20, 0xce, 0xc1, 0x7a, 0x08, 0xe7, 0xc1, 0x9f,
0x47, 0x13, 0x6c, 0x2a, 0xf5, 0x76, 0xd3, 0xb3, 0x8a, 0x88, 0x0f, 0x4b, 0x03, 0xb8, 0x4b, 0x80,
0x11, 0xd9, 0x67, 0x0e, 0x82, 0x22, 0x13, 0x52, 0x27, 0x91, 0xe5, 0xc1, 0xe9, 0x77, 0x62, 0x71,
0xbb, 0x63, 0x5a, 0xce, 0xa5, 0x8e, 0x17, 0xa3, 0xc1, 0xcb, 0x29, 0x6f, 0xe0, 0xe3, 0x89, 0x78,
0xb9, 0x47, 0x46, 0x44, 0x2f, 0xbe, 0x13, 0x0d, 0xd3, 0x6d, 0xad, 0xee, 0x70, 0x8d, 0x0b, 0xe1,
0x8e, 0xb6, 0xc8, 0x1a, 0x89, 0xd7, 0xa7, 0xfe, 0x00, 0x0e, 0x50, 0x4b, 0xb5, 0x73, 0x96, 0xe9,
0x76, 0xfc, 0xc1, 0xfb, 0x72, 0x3e, 0x85, 0x86, 0x2c, 0x48, 0x7d, 0x84, 0xd5, 0xee, 0xf4, 0xad,
0x46, 0xa0, 0x0d, 0xac, 0x76, 0x28, 0xc6, 0xe5, 0x99, 0x8c, 0x31, 0xc0, 0x2e, 0x3c, 0x62, 0x69,
0xed, 0x26, 0xf5, 0xf7, 0xb7, 0x07, 0x33, 0xda, 0xea, 0xc2, 0x02, 0x61, 0xec, 0xe1, 0xc0, 0xf8,
0x27, 0xe4, 0x15, 0x1e, 0xaa, 0xfa, 0x4d, 0x05, 0x1d, 0x38, 0xbf, 0xba, 0x5a, 0xbd, 0xd0, 0x6e,
0xc2, 0x69, 0xd1, 0xae, 0x6a, 0xce, 0x06, 0xdb, 0x82, 0x3b, 0xf0, 0x6f, 0x7c, 0x0b, 0x66, 0x7d,
0x84, 0xf7, 0xe0, 0x0d, 0x34, 0xca, 0xd6, 0x23, 0x6d, 0x37, 0x06, 0x4c, 0xad, 0x84, 0xb8, 0x8a,
0x07, 0x52, 0x39, 0x20, 0x64, 0x8c, 0x8a, 0x06, 0xe2, 0xc3, 0xab, 0x9f, 0x45, 0xd3, 0x11, 0xf5,
0x98, 0xbd, 0xf8, 0x99, 0x15, 0xd7, 0xd1, 0x30, 0xd3, 0xc4, 0x3f, 0x91, 0x66, 0x3d, 0x60, 0xc5,
0x86, 0x1c, 0x4e, 0x28, 0xfb, 0x82, 0x14, 0x85, 0x63, 0xab, 0xbf, 0xcb, 0xa1, 0xa3, 0xe7, 0x4d,
0x4b, 0x7f, 0xd5, 0x6c, 0x3b, 0x9a, 0x01, 0xfb, 0xc7, 0x9c, 0xeb, 0x98, 0x76, 0x5d, 0x33, 0xa8,
0xb5, 0x87, 0x49, 0xab, 0x21, 0x25, 0xad, 0x4f, 0x64, 0x1d, 0x59, 0xb2, 0xbe, 0xa9, 0x19, 0xac,
0x13, 0xcb, 0x60, 0x97, 0x77, 0x49, 0x5e, 0xff, 0x74, 0xf6, 0x6f, 0x0a, 0xba, 0x25, 0x85, 0x73,
0x3f, 0x72, 0xdb, 0x4d, 0x39, 0xb7, 0x5d, 0xda, 0x9d, 0x31, 0xa7, 0x24, 0xba, 0x7f, 0xcf, 0xa5,
0x8e, 0x95, 0xa7, 0x56, 0x2f, 0x43, 0xae, 0xc1, 0xbe, 0x08, 0x5d, 0x17, 0x63, 0x9d, 0xcf, 0xa8,
0x4f, 0xcd, 0x5d, 0xf3, 0xaf, 0x7a, 0x00, 0x84, 0xc2, 0xb1, 0xb0, 0x4e, 0x23, 0xd9, 0x86, 0x00,
0x27, 0x81, 0x18, 0x7c, 0x12, 0x8d, 0xf1, 0xec, 0x41, 0xda, 0xe7, 0x0e, 0xb0, 0x7b, 0xa1, 0x95,
0xb0, 0x99, 0x44, 0x69, 0xf0, 0x03, 0xc0, 0xa2, 0x6d, 0xc7, 0x76, 0xb9, 0xe0, 0x3a, 0x69, 0x25,
0xec, 0x22, 0x51, 0x3a, 0x08, 0xf9, 0x93, 0xf5, 0x8e, 0x1b, 0xb9, 0x69, 0x14, 0xe9, 0x54, 0xd6,
0x21, 0x26, 0x5d, 0x5a, 0x56, 0x30, 0x88, 0x9e, 0x84, 0x9e, 0x48, 0x1b, 0x89, 0x89, 0x53, 0xdf,
0xcd, 0xa3, 0xdb, 0xfa, 0xfa, 0x28, 0x5e, 0xea, 0x93, 0x3d, 0x1c, 0xc9, 0x90, 0x39, 0x34, 0xd0,
0x84, 0xa1, 0xd9, 0x0e, 0x37, 0xf7, 0xaa, 0xde, 0xf2, 0x77, 0xb7, 0xff, 0xbd, 0x41, 0xc7, 0x65,
0x2c, 0xde, 0x16, 0xb6, 0x1c, 0x45, 0x21, 0x32, 0x28, 0xcb, 0x38, 0xc4, 0xc9, 0x3f, 0x2d, 0xe3,
0x98, 0x97, 0xbb, 0x49, 0x9c, 0x9e, 0x41, 0x88, 0x83, 0x79, 0x2c, 0xdf, 0x08, 0x20, 0x16, 0xe4,
0x6e, 0x12, 0xa7, 0x87, 0xac, 0xef, 0xb8, 0x40, 0x95, 0xcd, 0x1f, 0xb9, 0x3d, 0xf6, 0xf2, 0x8e,
0x3b, 0x01, 0xee, 0xf8, 0x7c, 0x7f, 0x52, 0x72, 0x3d, 0x2c, 0x75, 0x05, 0x4d, 0x9c, 0x37, 0x6d,
0xa7, 0xca, 0xb6, 0x64, 0xb6, 0x6f, 0xe1, 0xdb, 0x50, 0x1e, 0x9c, 0x53, 0x9c, 0x44, 0xc6, 0x84,
0xda, 0x79, 0xe6, 0xbc, 0xac, 0x9d, 0x77, 0x6b, 0xdb, 0xc2, 0xaf, 0xc3, 0x6e, 0xf0, 0x4b, 0xd6,
0xae, 0x9e, 0x43, 0xa3, 0x62, 0x5f, 0x8c, 0x02, 0xe5, 0xfb, 0x03, 0xe5, 0x13, 0x80, 0xbe, 0x9b,
0x03, 0x24, 0x6f, 0x1b, 0xd9, 0xc3, 0x0d, 0xe1, 0x79, 0x69, 0x43, 0x38, 0x33, 0xd8, 0x56, 0x9b,
0xba, 0x01, 0x34, 0x62, 0x1b, 0xc0, 0x23, 0x03, 0xe2, 0xf7, 0x0f, 0xf8, 0x6f, 0x2b, 0x68, 0x52,
0xde, 0xf4, 0x59, 0x44, 0x61, 0x6b, 0x48, 0xaf, 0xd3, 0x8b, 0xe1, 0x81, 0x3f, 0x88, 0x28, 0xb5,
0xb0, 0x8b, 0x44, 0xe9, 0x30, 0x0d, 0xd8, 0x98, 0x3b, 0x08, 0xa3, 0x94, 0x53, 0x94, 0x66, 0x4f,
0x27, 0x65, 0xef, 0xe9, 0x04, 0x14, 0x75, 0x2e, 0xc1, 0x9a, 0xb7, 0x20, 0x1d, 0xec, 0x11, 0xc3,
0x3d, 0x2b, 0x8a, 0xab, 0xfe, 0x5c, 0x41, 0x63, 0x42, 0xe1, 0xfd, 0xd8, 0x91, 0x9e, 0x93, 0x77,
0xa4, 0x07, 0x07, 0xcc, 0xa7, 0x92, 0x77, 0xa0, 0x77, 0xc2, 0xb1, 0xb0, 0x0c, 0x8a, 0x25, 0x78,
0x1b, 0xb0, 0x9c, 0xe2, 0x09, 0x1e, 0x5b, 0x62, 0x84, 0xf7, 0xe0, 0x2f, 0x2b, 0x68, 0x4a, 0x8f,
0xe5, 0x5c, 0xc2, 0xd4, 0x8f, 0x0d, 0xa6, 0x5a, 0x00, 0x13, 0x3e, 0x28, 0xc5, 0x7b, 0x48, 0x8f,
0x48, 0xd5, 0x45, 0x3d, 0x54, 0x58, 0x03, 0xed, 0x1d, 0xa7, 0x33, 0xe0, 0x5e, 0x99, 0x94, 0x4d,
0x56, 0x0a, 0x7c, 0xf8, 0xd0, 0x43, 0x38, 0xb4, 0xfa, 0x76, 0x2e, 0x30, 0x58, 0xcd, 0x5b, 0x23,
0x41, 0xbe, 0xab, 0xec, 0x46, 0xbe, 0x3b, 0x96, 0x94, 0xeb, 0x42, 0x04, 0xc9, 0x3b, 0xc6, 0xa0,
0xf7, 0x6d, 0x42, 0xc2, 0xea, 0x72, 0x2d, 0x8c, 0x53, 0xf0, 0x41, 0x18, 0x24, 0x7e, 0x09, 0x0d,
0xb3, 0xd3, 0x04, 0x5b, 0xe2, 0xf9, 0xc1, 0x43, 0x08, 0xb3, 0x57, 0xe8, 0x61, 0xec, 0x0b, 0x3c,
0x8c, 0xe3, 0x42, 0x9a, 0x3e, 0x21, 0xc5, 0x01, 0x7c, 0x15, 0x8d, 0x1b, 0xa6, 0xd6, 0xa8, 0x68,
0x86, 0x06, 0xc9, 0x88, 0x7f, 0x77, 0xff, 0x7f, 0xfd, 0x23, 0xe2, 0x72, 0x84, 0x43, 0xc4, 0x93,
0xe0, 0x51, 0x2f, 0xda, 0x47, 0x24, 0x6c, 0x55, 0x43, 0x28, 0x1c, 0x3d, 0x3e, 0x8e, 0x86, 0x99,
0x0b, 0x7b, 0x27, 0x83, 0x62, 0xa5, 0xc8, 0x74, 0x65, 0x9e, 0x0d, 0xba, 0xf2, 0x76, 0x7c, 0x0a,
0x21, 0x9b, 0xd6, 0x2d, 0xea, 0xf0, 0xb0, 0xc3, 0x2f, 0xbf, 0xc2, 0x00, 0x5c, 0x0b, 0x7a, 0x48,
0x84, 0x4a, 0xfd, 0x7a, 0x0e, 0xe5, 0x9f, 0x30, 0xd7, 0xf6, 0x30, 0xc8, 0x3f, 0x23, 0x05, 0xf9,
0xac, 0xeb, 0x1f, 0x74, 0x4b, 0x0d, 0xf0, 0x9f, 0x89, 0x05, 0xf8, 0xd3, 0x03, 0x60, 0xf7, 0x0f,
0xee, 0xbf, 0xce, 0xa3, 0x71, 0xa0, 0x9a, 0x37, 0xdb, 0x0d, 0x9d, 0xa7, 0x42, 0xf7, 0x4b, 0x97,
0x04, 0xb7, 0xc7, 0x2e, 0x09, 0xa6, 0xa2, 0xb4, 0x91, 0xeb, 0x81, 0x2b, 0x81, 0xa2, 0xde, 0xa4,
0x9c, 0x95, 0xc5, 0x01, 0x67, 0xdf, 0xd7, 0xf7, 0x72, 0x80, 0x29, 0xab, 0x07, 0xa7, 0x55, 0x9e,
0x43, 0x55, 0x2d, 0x73, 0xcd, 0x4b, 0xcc, 0xf2, 0xd9, 0x13, 0xb3, 0xc3, 0x42, 0x17, 0x9e, 0x9c,
0x05, 0x48, 0x44, 0x06, 0xc6, 0xaf, 0x20, 0xcc, 0x1a, 0x56, 0xe1, 0x70, 0x6d, 0x7b, 0xa3, 0x63,
0xe2, 0x86, 0xb2, 0x8b, 0x0b, 0x6e, 0xad, 0x96, 0x7b, 0xe0, 0x48, 0x82, 0x08, 0x76, 0x8f, 0x61,
0x51, 0xcd, 0x86, 0xbc, 0x75, 0x98, 0x9b, 0x2e, 0x3c, 0xee, 0xf3, 0x56, 0x22, 0x7a, 0xf1, 0xbd,
0x68, 0xb4, 0x05, 0xeb, 0x84, 0xe5, 0x67, 0x23, 0x9c, 0x30, 0x38, 0x79, 0xaf, 0x78, 0xcd, 0xc4,
0xef, 0x57, 0x7f, 0xa2, 0xa0, 0x51, 0x98, 0xa8, 0xfd, 0xd8, 0xfc, 0x9e, 0x96, 0x37, 0xbf, 0x53,
0xd9, 0x1d, 0x34, 0x65, 0xe3, 0xfb, 0x51, 0x9e, 0x8f, 0x81, 0xc7, 0x70, 0x38, 0xf3, 0x74, 0x34,
0x4b, 0x33, 0x0c, 0x6a, 0xe8, 0x76, 0x4b, 0xa4, 0x8e, 0xfc, 0xcc, 0x53, 0x0d, 0x9b, 0x49, 0x94,
0x86, 0xb1, 0xd4, 0xcd, 0x56, 0xc7, 0xa0, 0xfe, 0x0b, 0x43, 0xc0, 0x32, 0x1f, 0x36, 0x93, 0x28,
0x0d, 0xbe, 0x84, 0x0e, 0x6b, 0x75, 0x47, 0xdf, 0xa2, 0x0b, 0x54, 0x6b, 0x18, 0x7a, 0x9b, 0xfa,
0xb7, 0xb9, 0x79, 0x9e, 0x42, 0xde, 0x0c, 0xcc, 0x87, 0xe7, 0x92, 0x08, 0x48, 0x32, 0x9f, 0x74,
0x9d, 0x3e, 0xb4, 0x87, 0xd7, 0xe9, 0xf7, 0xa3, 0x71, 0x0d, 0x4e, 0x46, 0x7e, 0x0f, 0xf7, 0xa3,
0x42, 0x65, 0x8a, 0x85, 0xde, 0xb9, 0x48, 0x3b, 0x91, 0xa8, 0xa4, 0x4b, 0xf8, 0x91, 0xdd, 0x7e,
0x36, 0xfd, 0x71, 0x1e, 0x15, 0x83, 0xe0, 0x83, 0x4d, 0x84, 0xea, 0xfe, 0x02, 0xf7, 0xaf, 0x7d,
0x1e, 0xce, 0xee, 0x29, 0x41, 0x90, 0x08, 0xe3, 0x71, 0xd0, 0x64, 0x93, 0x88, 0x08, 0x88, 0xc8,
0x45, 0x08, 0x20, 0x96, 0x33, 0xe8, 0x59, 0x6e, 0x02, 0xb0, 0x8b, 0x35, 0x1f, 0x81, 0x84, 0x60,
0xb8, 0x09, 0x87, 0xe2, 0xc0, 0x67, 0x06, 0x8d, 0x48, 0xde, 0xe1, 0x57, 0x82, 0x21, 0x31, 0x58,
0x16, 0x16, 0x3c, 0xaf, 0x12, 0x07, 0xbc, 0x20, 0x2c, 0x78, 0x2e, 0x48, 0x44, 0x2f, 0x9e, 0x85,
0xa1, 0xba, 0xf5, 0x3a, 0xa5, 0x0d, 0xda, 0x10, 0x07, 0xb7, 0x83, 0x82, 0xb4, 0x58, 0xf3, 0x3b,
0x48, 0x48, 0xc3, 0x80, 0xd7, 0x35, 0x9d, 0xbd, 0x04, 0x8f, 0xc8, 0xc0, 0x4b, 0xbc, 0x95, 0x88,
0x5e, 0xf5, 0xaf, 0x39, 0x34, 0x21, 0xf9, 0x1f, 0xfe, 0x92, 0xc2, 0x2e, 0x12, 0x9c, 0xfa, 0x06,
0x6f, 0xf6, 0x27, 0x72, 0x65, 0x27, 0x3e, 0x5d, 0x5e, 0x09, 0xf1, 0xbc, 0xa7, 0xba, 0xc8, 0xbd,
0x44, 0xd0, 0x43, 0xa2, 0x62, 0xf1, 0xeb, 0x90, 0xe0, 0xf2, 0xef, 0xc5, 0xed, 0x0e, 0xcb, 0x1c,
0x22, 0x4f, 0x88, 0xe7, 0x76, 0xa2, 0x0b, 0xa1, 0x2f, 0xbb, 0x70, 0x52, 0xe6, 0xf7, 0xd1, 0x41,
0xa2, 0xbb, 0x12, 0x13, 0x44, 0x7a, 0x44, 0xcf, 0x9c, 0x45, 0x53, 0xf1, 0x51, 0x64, 0x7a, 0xb2,
0xfb, 0xb6, 0x82, 0x4a, 0x69, 0x8a, 0xb0, 0x53, 0x6c, 0x00, 0x14, 0x66, 0x87, 0x4f, 0xd2, 0xae,
0x87, 0xba, 0x88, 0x0a, 0x66, 0x87, 0xdd, 0x62, 0x88, 0x17, 0xbb, 0x62, 0xe5, 0x5e, 0x7f, 0x55,
0x5e, 0x12, 0xed, 0xb0, 0xf7, 0x1e, 0x96, 0xe0, 0xfd, 0x0e, 0x12, 0xb0, 0x62, 0x15, 0x8d, 0x70,
0x7d, 0xbc, 0x2c, 0xb3, 0x58, 0x41, 0xcc, 0x1f, 0x78, 0x7e, 0x0d, 0x5b, 0xb1, 0xd7, 0xc3, 0x0b,
0x56, 0x2e, 0x52, 0xe7, 0x15, 0xd3, 0xda, 0xac, 0x9a, 0x86, 0x5e, 0xef, 0xee, 0x61, 0x46, 0xb5,
0x26, 0x65, 0x54, 0x8f, 0x67, 0x9c, 0x55, 0x49, 0xcb, 0xb4, 0xdc, 0x4a, 0xfd, 0x0b, 0x98, 0x5d,
0xa2, 0x8c, 0x1e, 0xb3, 0x28, 0x1a, 0x66, 0x8f, 0x07, 0xbe, 0x8f, 0xef, 0x48, 0x03, 0x76, 0x26,
0x8d, 0xdc, 0x52, 0x33, 0x58, 0xe2, 0xa1, 0xb3, 0x71, 0xae, 0x5b, 0x66, 0x4b, 0x78, 0xef, 0xce,
0xa4, 0x50, 0x6a, 0x85, 0xe3, 0x5c, 0x02, 0x54, 0xc2, 0xb1, 0xd5, 0xdf, 0x2a, 0xe8, 0xa0, 0x44,
0xb9, 0x1f, 0x69, 0x81, 0x26, 0xa7, 0x05, 0x8f, 0xec, 0x64, 0x64, 0x29, 0x09, 0xc2, 0x57, 0x72,
0xb1, 0x71, 0x31, 0x0b, 0xc0, 0x56, 0x33, 0xd6, 0x31, 0x1b, 0xb5, 0xdd, 0xac, 0x3b, 0xf2, 0x12,
0x8d, 0x10, 0x94, 0x44, 0x25, 0xe0, 0x2f, 0x80, 0x79, 0x59, 0x6d, 0x83, 0xdd, 0xd1, 0xea, 0xb4,
0xb6, 0x9b, 0xaf, 0xe7, 0x87, 0xd9, 0xf3, 0xe1, 0xc5, 0x38, 0x34, 0xe9, 0x95, 0xa6, 0xfe, 0x30,
0x3e, 0xc5, 0xcc, 0xc9, 0xf0, 0x53, 0xa8, 0xc0, 0x0b, 0x4d, 0xeb, 0xa6, 0x21, 0xe2, 0xc7, 0x03,
0x6c, 0xb6, 0xaa, 0xa2, 0x0d, 0xe2, 0xc2, 0xff, 0xf4, 0xcd, 0xc9, 0x7d, 0x42, 0x12, 0xc0, 0xe0,
0x65, 0x34, 0xd4, 0x19, 0xfc, 0xe6, 0x86, 0x1f, 0xd5, 0xf9, 0x75, 0x0d, 0x47, 0x51, 0xff, 0x11,
0x57, 0x9b, 0x27, 0x7b, 0xf6, 0xee, 0xcf, 0x60, 0xb0, 0xa7, 0xa4, 0xce, 0xa2, 0x85, 0x46, 0xc5,
0x05, 0xc6, 0x80, 0x3b, 0x49, 0x5a, 0x24, 0x09, 0xb3, 0x74, 0xbf, 0xd1, 0x17, 0xc4, 0x17, 0x26,
0x57, 0xa8, 0xee, 0x5a, 0xba, 0xd3, 0xdd, 0xf3, 0xa0, 0xba, 0x2e, 0x05, 0xd5, 0x85, 0x8c, 0x03,
0xec, 0xd1, 0x34, 0x35, 0xb0, 0xfe, 0x51, 0x41, 0x87, 0x7b, 0xa8, 0xf7, 0x23, 0xe8, 0x50, 0x39,
0xe8, 0x3c, 0xbe, 0xd3, 0x11, 0xa6, 0x04, 0x9e, 0x6b, 0x28, 0x61, 0x7c, 0xdc, 0x75, 0x4f, 0x21,
0xd4, 0xb1, 0xf4, 0x2d, 0x48, 0x9f, 0x9a, 0xa2, 0x18, 0xb0, 0x10, 0xce, 0x49, 0x35, 0xe8, 0x21,
0x11, 0x2a, 0xfc, 0x39, 0x56, 0xa8, 0xb7, 0xae, 0xb9, 0x86, 0x33, 0xd7, 0x68, 0xcc, 0x6b, 0x1d,
0x6d, 0x4d, 0x37, 0x20, 0x8b, 0x15, 0xaf, 0xc6, 0xc5, 0xca, 0xa2, 0x57, 0xa4, 0x97, 0x44, 0x01,
0x2b, 0xf8, 0xee, 0xfe, 0xa7, 0x6a, 0x9f, 0xb8, 0x4b, 0x52, 0x84, 0xe0, 0x2f, 0xc2, 0x2e, 0x68,
0x79, 0xf9, 0x46, 0x63, 0xc1, 0x32, 0x3b, 0x92, 0x06, 0x5e, 0x32, 0x70, 0x0e, 0x34, 0x28, 0x91,
0x14, 0x9a, 0x2c, 0x3a, 0xa4, 0x0a, 0xc2, 0x0e, 0x3a, 0x04, 0x27, 0x37, 0xf3, 0x15, 0x2a, 0x5b,
0x60, 0x88, 0xcb, 0xaf, 0xb0, 0xda, 0x87, 0xb9, 0xde, 0xee, 0x2c, 0xa2, 0x93, 0xe0, 0x21, 0x75,
0x1e, 0xdd, 0x32, 0x0d, 0x17, 0xa2, 0x29, 0x24, 0xce, 0x4c, 0x12, 0x8b, 0xb8, 0xa3, 0x57, 0xbc,
0xa6, 0x4f, 0x58, 0x46, 0x5c, 0xe3, 0x57, 0x1c, 0x3e, 0x15, 0xbb, 0xf6, 0x66, 0xf7, 0x50, 0x62,
0xad, 0xf3, 0xfc, 0xb9, 0x10, 0x06, 0x97, 0xf3, 0x61, 0x17, 0x89, 0xd2, 0xe1, 0x16, 0x2a, 0x6e,
0x88, 0x27, 0x10, 0xbb, 0x34, 0x3a, 0xd0, 0x86, 0x28, 0x3d, 0xa1, 0x84, 0x09, 0xbe, 0xdf, 0x6c,
0x93, 0x50, 0x02, 0xbb, 0x28, 0xe0, 0x1f, 0x17, 0x16, 0x78, 0x4d, 0x4f, 0x21, 0x0c, 0x41, 0xe7,
0xbd, 0x66, 0xe2, 0xf7, 0xfb, 0xa4, 0x17, 0xaa, 0xf3, 0xa5, 0x62, 0x2f, 0x29, 0x34, 0x13, 0xbf,
0x1f, 0x77, 0xd0, 0xa8, 0x4d, 0x97, 0xf5, 0xb6, 0xbb, 0x5d, 0x42, 0x7c, 0xe9, 0x2e, 0x66, 0x7d,
0xe9, 0x5c, 0xe4, 0xdc, 0xb1, 0xf2, 0x8a, 0x50, 0xa2, 0xe8, 0x27, 0xbe, 0x18, 0xbc, 0x8d, 0x8a,
0x96, 0xdb, 0x9e, 0xb3, 0x2f, 0xdb, 0xd4, 0x2a, 0x8d, 0x71, 0x99, 0x59, 0xa3, 0x32, 0xf1, 0xf9,
0xe3, 0x52, 0x03, 0x0b, 0x06, 0x14, 0x24, 0x14, 0x86, 0xbf, 0xa1, 0x20, 0x6c, 0xbb, 0x1d, 0x38,
0x8e, 0xb1, 0x1c, 0x5c, 0x33, 0x78, 0x85, 0x87, 0x5d, 0x1a, 0xe7, 0x3a, 0x54, 0x33, 0xbf, 0xf0,
0xc6, 0x81, 0xe2, 0xca, 0x04, 0x37, 0x46, 0xbd, 0xa4, 0x24, 0x41, 0x0f, 0x36, 0x15, 0xeb, 0x36,
0xff, 0x7f, 0x69, 0x62, 0xa0, 0xa9, 0x48, 0xae, 0x74, 0x09, 0xa7, 0x42, 0xf4, 0x13, 0x5f, 0x0c,
0xab, 0x26, 0xb6, 0xa8, 0xd6, 0xb8, 0xd4, 0x36, 0xba, 0xc4, 0x34, 0x9d, 0x25, 0x88, 0x5d, 0x76,
0xd7, 0x86, 0x68, 0x58, 0x9a, 0xe4, 0x6e, 0x13, 0x54, 0x13, 0x93, 0x44, 0x2a, 0x92, 0xc2, 0xcd,
0xab, 0x89, 0xc5, 0xc3, 0xe4, 0xde, 0xfe, 0x6a, 0x60, 0x67, 0xd5, 0xc4, 0xa1, 0x8a, 0x7b, 0x56,
0x4d, 0x1c, 0x11, 0xd1, 0xff, 0xc2, 0xf6, 0xc3, 0x3c, 0x3a, 0x14, 0x12, 0x87, 0xf7, 0xb6, 0x0f,
0x4b, 0xf7, 0xb6, 0x77, 0xc7, 0xee, 0x6d, 0x8f, 0x26, 0xb0, 0xfc, 0xf7, 0xfa, 0xf6, 0x5f, 0xf4,
0xfa, 0x96, 0x15, 0x8c, 0x87, 0xf3, 0xf5, 0x6f, 0x50, 0x30, 0x1e, 0x2a, 0x9b, 0x92, 0x32, 0xbd,
0x9b, 0x8b, 0x8e, 0xe8, 0x3f, 0xb1, 0x2a, 0x39, 0xa1, 0x48, 0x78, 0x28, 0x5b, 0x91, 0xb0, 0xfa,
0x87, 0x3c, 0x9a, 0x8a, 0x87, 0x09, 0xa9, 0xe6, 0x55, 0xb9, 0x6e, 0xcd, 0x6b, 0x15, 0x4d, 0xaf,
0xbb, 0x86, 0xd1, 0xe5, 0x06, 0x89, 0xd4, 0x90, 0x78, 0xd7, 0xe3, 0xb7, 0x0a, 0xce, 0xe9, 0xa5,
0x04, 0x1a, 0x92, 0xc8, 0x99, 0x52, 0xbf, 0x9b, 0x1f, 0xa8, 0x7e, 0xf7, 0x61, 0x34, 0xc1, 0xf6,
0x89, 0x6e, 0xac, 0xb4, 0x25, 0x08, 0x00, 0x24, 0xda, 0x49, 0x64, 0xda, 0xe4, 0x5a, 0xdc, 0xe1,
0x01, 0x6a, 0x71, 0xb7, 0xa4, 0xcb, 0xea, 0x11, 0xbe, 0x20, 0x2a, 0x03, 0x2f, 0x88, 0x1b, 0xbe,
0xb3, 0x56, 0x6f, 0x45, 0x33, 0x82, 0x8d, 0x7d, 0x03, 0x91, 0xc3, 0x0a, 0x60, 0xa9, 0xb5, 0xe0,
0xb6, 0x5a, 0x5d, 0xf5, 0x2c, 0x2c, 0x20, 0xa9, 0xd4, 0xda, 0x9b, 0x79, 0xaf, 0xfa, 0x5b, 0xd4,
0xc0, 0x44, 0x66, 0xde, 0x6b, 0x27, 0x01, 0x85, 0xfa, 0x81, 0x82, 0x8e, 0xa6, 0x14, 0xdf, 0xe2,
0xab, 0x68, 0xb2, 0xa5, 0x6d, 0x47, 0xaa, 0x8b, 0x45, 0x88, 0xc9, 0x7a, 0xbe, 0xe7, 0xd7, 0xda,
0x2b, 0x12, 0x12, 0x89, 0x21, 0xf3, 0x2d, 0x5e, 0xdb, 0xae, 0xb9, 0x56, 0x93, 0x0e, 0x78, 0x8b,
0xc0, 0x97, 0xef, 0x8a, 0xc0, 0x20, 0x01, 0x1a, 0x2b, 0xe1, 0x2d, 0xa5, 0xe5, 0x7b, 0x90, 0xb9,
0x47, 0x8b, 0x78, 0xef, 0x88, 0x15, 0xf1, 0x1e, 0xec, 0xe1, 0xdb, 0xa7, 0x12, 0xde, 0x77, 0x14,
0x74, 0x24, 0x39, 0x2f, 0xc6, 0xff, 0x2f, 0x69, 0x7c, 0x3c, 0xa6, 0xf1, 0x81, 0x18, 0x97, 0xd0,
0x77, 0x03, 0x4d, 0x8a, 0xec, 0x59, 0xc0, 0xdc, 0xc0, 0x2f, 0x4d, 0xb7, 0x82, 0xd4, 0xdc, 0xcf,
0x03, 0xf9, 0x3c, 0xca, 0x6d, 0x24, 0x86, 0xab, 0x7e, 0x2b, 0x87, 0x86, 0x79, 0x65, 0xdb, 0x1e,
0x26, 0x6d, 0xcf, 0x4a, 0x49, 0x5b, 0xd6, 0xb7, 0x6f, 0xae, 0x5d, 0x6a, 0xbe, 0xb6, 0x16, 0xcb,
0xd7, 0xce, 0x0c, 0x84, 0xde, 0x3f, 0x55, 0x7b, 0x08, 0x15, 0x03, 0x25, 0xb2, 0x05, 0x6a, 0x96,
0x18, 0x8f, 0x45, 0x44, 0x64, 0x0c, 0xf3, 0x5b, 0xd2, 0x8e, 0x39, 0xc8, 0x4f, 0xa2, 0x23, 0xb2,
0xcb, 0xfe, 0x56, 0xe9, 0xbd, 0xcf, 0x84, 0xb5, 0xa9, 0xbd, 0x3b, 0x28, 0x04, 0x29, 0xef, 0x77,
0xe5, 0xc1, 0xed, 0x5d, 0x9e, 0x7b, 0xef, 0x11, 0xc1, 0x33, 0xb9, 0x2a, 0xf5, 0x92, 0x18, 0xf5,
0x0c, 0x6c, 0x00, 0x92, 0xb0, 0x4c, 0xcf, 0x28, 0x3f, 0x55, 0xd0, 0x74, 0x52, 0x35, 0x2d, 0x2b,
0x99, 0xda, 0xd4, 0x45, 0xf9, 0x4f, 0xa4, 0x64, 0xea, 0x49, 0x68, 0x23, 0xbc, 0x27, 0xf8, 0xe1,
0x5a, 0x2e, 0xf5, 0x87, 0x6b, 0xa7, 0x10, 0x02, 0x53, 0x89, 0xdf, 0xea, 0x8b, 0x51, 0x05, 0xce,
0x1b, 0xfe, 0x8a, 0x9f, 0x44, 0xa8, 0x78, 0x91, 0x5c, 0xa8, 0x0f, 0xdf, 0xcc, 0xa2, 0x45, 0x72,
0x11, 0x55, 0xa3, 0x74, 0xea, 0xcf, 0x14, 0x74, 0xc7, 0x75, 0xcf, 0x8c, 0xb8, 0x22, 0x85, 0x87,
0x72, 0x2c, 0x3c, 0x1c, 0x4b, 0x07, 0xd8, 0xc7, 0x1f, 0x28, 0xbc, 0x91, 0x43, 0x78, 0x75, 0x43,
0xb7, 0x1a, 0x55, 0xcd, 0x72, 0x60, 0xa7, 0xf6, 0x06, 0xb8, 0x87, 0x01, 0x03, 0x2c, 0xde, 0xa0,
0x76, 0xdd, 0xd2, 0xb9, 0x91, 0xc4, 0x74, 0x06, 0x16, 0x5f, 0x08, 0xbb, 0x48, 0x94, 0x0e, 0xce,
0x6e, 0x05, 0x91, 0x2f, 0xfb, 0x55, 0x56, 0x59, 0x13, 0xe0, 0xd0, 0x03, 0xc2, 0xf5, 0x21, 0x1a,
0x60, 0x5d, 0xfa, 0xe0, 0xea, 0x9b, 0x10, 0xee, 0x7b, 0x0d, 0xb2, 0xe0, 0xd5, 0x10, 0xed, 0x95,
0x51, 0x6e, 0x45, 0x43, 0x1c, 0x95, 0x59, 0x63, 0xdc, 0xbb, 0x83, 0x67, 0x12, 0x09, 0x6f, 0x55,
0x3f, 0x56, 0xd0, 0x4c, 0xb2, 0x4a, 0xfb, 0x71, 0xee, 0xb8, 0x2a, 0x9f, 0x3b, 0xb2, 0xde, 0x63,
0x24, 0x2b, 0x9e, 0x72, 0x06, 0xf9, 0x20, 0xd1, 0xf8, 0xfb, 0x31, 0xca, 0x75, 0x79, 0x94, 0x73,
0x3b, 0x1e, 0x65, 0xf2, 0x08, 0x2b, 0xf7, 0x5e, 0xfb, 0xd3, 0xb1, 0x9b, 0xde, 0x83, 0xbf, 0xdf,
0xc3, 0xdf, 0x6b, 0x1f, 0x1d, 0x53, 0xae, 0xc1, 0xdf, 0x7b, 0xf0, 0xf7, 0x21, 0xfc, 0xbd, 0xf9,
0xe7, 0x63, 0x37, 0x3d, 0x3b, 0x2a, 0x30, 0xff, 0x19, 0x00, 0x00, 0xff, 0xff, 0xdd, 0x50, 0x26,
0x17, 0x14, 0x46, 0x00, 0x00,
// 3916 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x5b, 0x5b, 0x8f, 0x1c, 0xc7,
0x75, 0xd6, 0xcc, 0xec, 0x65, 0xa6, 0xf6, 0xc2, 0x65, 0x71, 0x49, 0x8e, 0x56, 0x12, 0x29, 0xb5,
0x10, 0x5d, 0x10, 0x71, 0x36, 0x64, 0x24, 0x85, 0xa2, 0x24, 0x4a, 0x3b, 0x7b, 0x21, 0x29, 0xed,
0x92, 0xa3, 0x9a, 0x25, 0xa5, 0xe8, 0x9a, 0xde, 0x99, 0xda, 0xd9, 0xe6, 0xf6, 0x4c, 0x8f, 0xfa,
0xb2, 0xda, 0x55, 0x10, 0x44, 0x41, 0x12, 0x20, 0x2f, 0x51, 0xf4, 0x16, 0x01, 0x89, 0x1f, 0x0c,
0xd8, 0xf0, 0x93, 0x05, 0x1b, 0x30, 0xa0, 0x07, 0xbf, 0xd8, 0x82, 0x0d, 0xd3, 0x0f, 0x86, 0x65,
0xc3, 0x86, 0xfd, 0x60, 0x4b, 0xb2, 0x0c, 0x5b, 0xf0, 0x5f, 0x90, 0xfd, 0xe0, 0x53, 0x97, 0xbe,
0x54, 0x4f, 0xf7, 0x90, 0x3d, 0x7b, 0x81, 0x0d, 0xfb, 0x61, 0x41, 0x76, 0xd5, 0x39, 0xdf, 0x39,
0x75, 0xea, 0xd4, 0xa9, 0x53, 0x55, 0x67, 0xd0, 0x93, 0x9b, 0x67, 0x9d, 0x8a, 0x61, 0xcd, 0x6e,
0x7a, 0x6b, 0xd4, 0xee, 0x50, 0x97, 0x3a, 0xb3, 0xdd, 0xcd, 0xd6, 0xac, 0xde, 0x35, 0x9c, 0x59,
0xba, 0xed, 0xd2, 0x8e, 0x63, 0x58, 0x1d, 0x67, 0x76, 0xeb, 0xf4, 0x1a, 0x75, 0xf5, 0xd3, 0xb3,
0x2d, 0xda, 0xa1, 0xb6, 0xee, 0xd2, 0x66, 0xa5, 0x6b, 0x5b, 0xae, 0x85, 0x4f, 0x09, 0xf6, 0x4a,
0xc8, 0x5e, 0x01, 0xf6, 0x0a, 0x63, 0xaf, 0x84, 0xec, 0x15, 0xc9, 0x3e, 0x73, 0xaa, 0x65, 0xb8,
0x1b, 0xde, 0x5a, 0xa5, 0x61, 0xb5, 0x67, 0x5b, 0x56, 0xcb, 0x9a, 0xe5, 0x28, 0x6b, 0xde, 0x3a,
0xff, 0xe2, 0x1f, 0xfc, 0x7f, 0x02, 0x7d, 0xe6, 0x4c, 0xaa, 0x72, 0xb3, 0x36, 0x75, 0x2c, 0xcf,
0x6e, 0xd0, 0xb8, 0x46, 0x33, 0x8f, 0xa4, 0xf3, 0x78, 0x9d, 0x2d, 0x6a, 0x33, 0x85, 0x68, 0xb3,
0x87, 0xed, 0xa1, 0x74, 0xb6, 0xad, 0x9e, 0x61, 0xcf, 0x9c, 0x4a, 0xa6, 0xb6, 0xbd, 0x8e, 0x6b,
0xb4, 0x7b, 0x75, 0x3a, 0x9d, 0x4c, 0xee, 0xb9, 0x86, 0x39, 0x6b, 0x74, 0x5c, 0xc7, 0xb5, 0xe3,
0x2c, 0x5a, 0x05, 0xa1, 0xb9, 0xda, 0xa5, 0x6b, 0x42, 0x5f, 0x7c, 0x37, 0x1a, 0xea, 0xe8, 0x6d,
0x5a, 0xce, 0xdd, 0x9d, 0x7b, 0xa0, 0x54, 0x1d, 0xbf, 0xf1, 0xd1, 0xc9, 0xdb, 0x3e, 0xfd, 0xe8,
0xe4, 0xd0, 0x65, 0x68, 0x23, 0xbc, 0x47, 0x7b, 0x19, 0x4d, 0xcf, 0xd7, 0xae, 0xae, 0xea, 0x76,
0x8b, 0xba, 0x57, 0x01, 0xd7, 0x78, 0x53, 0x77, 0x19, 0xe7, 0x02, 0x9a, 0x72, 0x79, 0x63, 0x8d,
0x82, 0xb5, 0x3a, 0xae, 0xde, 0x12, 0x28, 0xc3, 0xd5, 0xb2, 0x44, 0x99, 0x5a, 0x8d, 0xf5, 0x93,
0x1e, 0x0e, 0xed, 0x7f, 0x73, 0xe8, 0xf6, 0x79, 0xcf, 0x71, 0xad, 0xf6, 0x0a, 0x75, 0x6d, 0xa3,
0x31, 0xef, 0xd9, 0x36, 0x74, 0xd5, 0x5d, 0xdd, 0xf5, 0x9c, 0x9b, 0x6b, 0x87, 0x5f, 0x40, 0xc3,
0x5b, 0xba, 0xe9, 0xd1, 0x72, 0x1e, 0x48, 0xc6, 0xce, 0x3c, 0x54, 0x49, 0x75, 0x9b, 0x8a, 0x3f,
0xb1, 0x95, 0xe7, 0x3c, 0x1d, 0xac, 0xe9, 0xee, 0x54, 0xa7, 0x25, 0xe0, 0xb8, 0x94, 0x7a, 0x8d,
0x21, 0x11, 0x01, 0xa8, 0xbd, 0x9d, 0x43, 0x77, 0xa5, 0x6a, 0xb6, 0x6c, 0x38, 0x2e, 0x6e, 0xa3,
0x61, 0xc3, 0xa5, 0x6d, 0x07, 0xd4, 0x2b, 0x80, 0xec, 0x8b, 0x95, 0x4c, 0x2e, 0x5b, 0x49, 0x05,
0xaf, 0x4e, 0x48, 0xbd, 0x86, 0x2f, 0x31, 0x78, 0x22, 0xa4, 0x68, 0xff, 0x93, 0x43, 0x38, 0xca,
0x23, 0xac, 0x7b, 0x0b, 0x36, 0x7a, 0x7e, 0x37, 0x36, 0x3a, 0x22, 0x01, 0xc7, 0x84, 0x38, 0xc5,
0x44, 0x6f, 0xe5, 0xd0, 0xb1, 0x5e, 0x8d, 0xb8, 0x6d, 0xd6, 0x55, 0xdb, 0xcc, 0xed, 0xc2, 0x36,
0x02, 0x35, 0xc5, 0x28, 0x5f, 0xcd, 0xa3, 0xd2, 0x82, 0x4e, 0xdb, 0x56, 0xa7, 0x0e, 0xb6, 0x78,
0x01, 0x15, 0xdb, 0xc0, 0xdf, 0xd4, 0x5d, 0x9d, 0xdb, 0x63, 0xec, 0xcc, 0x03, 0x7d, 0x06, 0xbb,
0x75, 0xba, 0x72, 0x65, 0xed, 0x3a, 0x6d, 0xb8, 0x20, 0x47, 0xaf, 0x62, 0x89, 0x8f, 0xc2, 0x36,
0x12, 0xa0, 0xe1, 0x57, 0xd1, 0x90, 0xd3, 0xa5, 0x0d, 0x69, 0xc2, 0x27, 0x32, 0x0e, 0x27, 0xd0,
0xb0, 0x0e, 0x18, 0xe1, 0x1c, 0xb1, 0x2f, 0xc2, 0x71, 0xc1, 0x5e, 0x23, 0x0e, 0x9f, 0xfc, 0x72,
0x81, 0x4b, 0x38, 0x3f, 0xb0, 0x04, 0xe1, 0x42, 0x93, 0x52, 0xc6, 0x88, 0xf8, 0x26, 0x12, 0x5d,
0xfb, 0x41, 0x0e, 0x4d, 0x04, 0xb4, 0x7c, 0xa6, 0x5e, 0xe9, 0xb1, 0xd9, 0x6c, 0x1f, 0x9b, 0x45,
0x22, 0x5d, 0x85, 0xb1, 0x73, 0xd3, 0x4d, 0x49, 0x61, 0x45, 0xbf, 0x25, 0x62, 0xb8, 0x57, 0x7c,
0x47, 0xc8, 0x73, 0x47, 0x38, 0x3b, 0xe8, 0xb8, 0x52, 0xe6, 0xff, 0x27, 0xd1, 0xf1, 0xd4, 0x85,
0x25, 0x8b, 0x0e, 0x35, 0x61, 0x06, 0x2d, 0x5b, 0x8e, 0x27, 0xeb, 0x6c, 0x2d, 0xeb, 0x6b, 0xd4,
0xac, 0x4b, 0x8c, 0xea, 0x38, 0x1b, 0x98, 0xff, 0x45, 0x02, 0x6c, 0xfc, 0x12, 0x2a, 0x82, 0x06,
0x5d, 0x13, 0x42, 0xab, 0xf4, 0x8a, 0x53, 0xfd, 0x7d, 0xad, 0x66, 0x35, 0x57, 0x25, 0x03, 0x77,
0x83, 0xc0, 0x6a, 0x7e, 0x2b, 0x09, 0x00, 0xb5, 0xef, 0xe4, 0xd1, 0xa1, 0xd8, 0x94, 0xe2, 0x6b,
0xe8, 0x58, 0x43, 0x84, 0x89, 0xcb, 0x5e, 0x1b, 0x04, 0xd4, 0x1b, 0x1b, 0xb4, 0xe9, 0x99, 0xb4,
0x29, 0xc3, 0xee, 0x09, 0x89, 0x77, 0x6c, 0x3e, 0x91, 0x8a, 0xa4, 0x70, 0xe3, 0x67, 0x10, 0xee,
0xf0, 0xa6, 0x15, 0xc3, 0x71, 0x02, 0xcc, 0x3c, 0xc7, 0x9c, 0x91, 0x98, 0xf8, 0x72, 0x0f, 0x05,
0x49, 0xe0, 0x62, 0x3a, 0x36, 0xa9, 0x63, 0xd8, 0xb4, 0x19, 0xd7, 0xb1, 0xa0, 0xea, 0xb8, 0x90,
0x48, 0x45, 0x52, 0xb8, 0xf1, 0x23, 0x68, 0x4c, 0x48, 0x23, 0x54, 0x6f, 0xee, 0x94, 0x87, 0x38,
0x58, 0x10, 0x9a, 0x2e, 0x87, 0x5d, 0x24, 0x4a, 0xa7, 0x7d, 0x2d, 0x8f, 0xd0, 0x02, 0xed, 0x9a,
0xd6, 0x4e, 0x1b, 0x06, 0xbe, 0x8f, 0xe1, 0xe1, 0x35, 0x25, 0x3c, 0x3c, 0x99, 0xd5, 0xc9, 0x03,
0x15, 0x53, 0xe3, 0x43, 0x2b, 0x16, 0x1f, 0x9e, 0x1a, 0x5c, 0x44, 0xff, 0x00, 0xf1, 0xc3, 0x1c,
0x9a, 0x0c, 0x89, 0x0f, 0x22, 0x42, 0xbc, 0xaa, 0x46, 0x88, 0xc7, 0x06, 0x1e, 0x59, 0x4a, 0x88,
0x78, 0xb7, 0x80, 0x70, 0x48, 0x44, 0x2c, 0xd3, 0x5c, 0xd3, 0x1b, 0x9b, 0xb7, 0xb0, 0x6f, 0x7e,
0x19, 0x36, 0x5c, 0xaf, 0xdb, 0x64, 0xb9, 0xd3, 0x5c, 0xa7, 0x63, 0xb9, 0x3c, 0xef, 0xf1, 0xd5,
0xfc, 0xc7, 0x81, 0xd5, 0xf4, 0x35, 0xa8, 0x5c, 0xed, 0xc1, 0x5e, 0xec, 0xb8, 0xf6, 0x4e, 0xb8,
0xe8, 0x7a, 0x09, 0x48, 0x82, 0x42, 0xf8, 0x75, 0x84, 0x6c, 0x89, 0xb9, 0x6a, 0x49, 0xff, 0xc8,
0xea, 0x82, 0xbe, 0x52, 0xf3, 0x56, 0x67, 0xdd, 0x68, 0x85, 0xde, 0x4e, 0x02, 0x60, 0x12, 0x11,
0x32, 0xb3, 0x88, 0x8e, 0xa7, 0x68, 0x8f, 0xa7, 0x50, 0x61, 0x93, 0xee, 0x08, 0xb3, 0x12, 0xf6,
0x5f, 0x3c, 0x1d, 0xcd, 0x3f, 0x4a, 0x32, 0x79, 0x38, 0x97, 0x3f, 0x9b, 0xd3, 0xde, 0x1d, 0x8e,
0x3a, 0x1b, 0x0f, 0xdf, 0x0f, 0xa0, 0xa2, 0x0d, 0x2d, 0x46, 0x43, 0x77, 0x64, 0x5c, 0xe3, 0x01,
0x98, 0xc8, 0x36, 0x12, 0xf4, 0x2a, 0x81, 0x3e, 0x7f, 0x40, 0x81, 0xbe, 0xb0, 0xc7, 0x81, 0x1e,
0x5b, 0x30, 0x08, 0x97, 0xa5, 0xe7, 0x2d, 0x11, 0xd5, 0xb2, 0xa7, 0x4a, 0xd1, 0x95, 0x2d, 0x80,
0x42, 0x81, 0x7e, 0x0b, 0x09, 0x84, 0xe0, 0x39, 0x74, 0xa8, 0x6d, 0x74, 0x78, 0x78, 0xac, 0xd3,
0x86, 0xd5, 0x69, 0x3a, 0xe5, 0x61, 0x6e, 0xe6, 0xe3, 0x92, 0xe9, 0xd0, 0x8a, 0xda, 0x4d, 0xe2,
0xf4, 0x78, 0x19, 0x4d, 0xdb, 0x74, 0xcb, 0x60, 0x6a, 0x5c, 0x84, 0xe5, 0x6c, 0xd9, 0x3b, 0xcb,
0x46, 0xdb, 0x70, 0xcb, 0x23, 0x22, 0xfb, 0x07, 0x8c, 0x69, 0x92, 0xd0, 0x4f, 0x12, 0xb9, 0xf0,
0x7d, 0x68, 0xa4, 0xab, 0x7b, 0x0e, 0x6c, 0x11, 0xa3, 0xc0, 0x5f, 0x0c, 0x03, 0x53, 0x8d, 0xb7,
0x12, 0xd9, 0x0b, 0xd9, 0x76, 0xd4, 0xcb, 0x8b, 0x7b, 0xe1, 0xe5, 0x93, 0xe9, 0x1e, 0xae, 0x7d,
0x96, 0x47, 0x53, 0xf1, 0xa0, 0xc9, 0xb6, 0x4a, 0x6b, 0xcd, 0xa1, 0xf6, 0x16, 0x6d, 0x5e, 0x10,
0xc7, 0x2a, 0x80, 0xe7, 0x6e, 0x5a, 0x08, 0x57, 0xed, 0x95, 0x1e, 0x0a, 0x92, 0xc0, 0x85, 0x1f,
0x8a, 0x38, 0xba, 0xd8, 0x6c, 0x83, 0x69, 0x4b, 0x70, 0x76, 0x98, 0x36, 0xb9, 0xf2, 0xfd, 0x4e,
0xb9, 0xa3, 0x06, 0xd3, 0x76, 0x55, 0xed, 0x26, 0x71, 0x7a, 0x7c, 0x01, 0x1d, 0xd6, 0xb7, 0x74,
0xc3, 0xd4, 0xd7, 0x4c, 0x1a, 0x80, 0x88, 0x9d, 0xf4, 0x76, 0x09, 0x72, 0x78, 0x2e, 0x4e, 0x40,
0x7a, 0x79, 0xf0, 0x0a, 0x3a, 0xe2, 0x75, 0x7a, 0xa1, 0x84, 0x1b, 0xdd, 0x21, 0xa1, 0x8e, 0x5c,
0xed, 0x25, 0x21, 0x49, 0x7c, 0xda, 0x8f, 0x72, 0xd1, 0xf8, 0xec, 0xbb, 0x2c, 0x3e, 0x87, 0x86,
0xdc, 0x9d, 0xae, 0x1f, 0x9f, 0xef, 0xf3, 0xe3, 0xf3, 0x2a, 0xb4, 0x7d, 0xce, 0x13, 0x88, 0x38,
0x07, 0xeb, 0x21, 0x9c, 0x07, 0xff, 0x2b, 0x9a, 0x60, 0x53, 0x69, 0x74, 0x5a, 0xc2, 0x2a, 0x32,
0x3e, 0x2c, 0x0d, 0xe0, 0x2e, 0x01, 0x46, 0x64, 0x9f, 0x39, 0x0c, 0x8a, 0x4c, 0x28, 0x9d, 0x44,
0x95, 0x07, 0x87, 0xe6, 0x89, 0xc5, 0xed, 0xae, 0x65, 0xbb, 0x57, 0xba, 0x22, 0x46, 0x83, 0x97,
0x53, 0xde, 0xc0, 0xc7, 0x13, 0xf1, 0x72, 0x41, 0x46, 0x64, 0x2f, 0xbe, 0x17, 0x0d, 0xd3, 0x6d,
0xbd, 0xe1, 0x72, 0x8d, 0x8b, 0xe1, 0x8e, 0xb6, 0xc8, 0x1a, 0x89, 0xe8, 0xd3, 0xbe, 0x0e, 0xe7,
0xae, 0xa5, 0xfa, 0x05, 0xdb, 0xf2, 0xba, 0xfe, 0xe0, 0x7d, 0x39, 0xff, 0x80, 0x86, 0x6c, 0xc8,
0x98, 0xa4, 0xd5, 0xee, 0xf5, 0xad, 0x46, 0xa0, 0x0d, 0xac, 0x76, 0x24, 0xc6, 0x25, 0x4c, 0xc6,
0x18, 0x60, 0x17, 0x1e, 0xb1, 0xf5, 0x4e, 0x8b, 0xfa, 0xfb, 0xdb, 0xa3, 0x19, 0x6d, 0x75, 0x69,
0x81, 0x30, 0xf6, 0x70, 0x60, 0xfc, 0x13, 0xf2, 0x0a, 0x81, 0xaa, 0x7d, 0x21, 0x87, 0x0e, 0x5d,
0x5c, 0x5d, 0xad, 0x5d, 0xea, 0xb4, 0xe0, 0x90, 0xe9, 0xd4, 0x74, 0x77, 0x83, 0x6d, 0xc1, 0x5d,
0xf8, 0x37, 0xbe, 0x05, 0xb3, 0x3e, 0xc2, 0x7b, 0xf0, 0x06, 0x1a, 0x65, 0xeb, 0x91, 0x76, 0x9a,
0x03, 0xa6, 0x56, 0x52, 0x5c, 0x55, 0x80, 0x54, 0x0f, 0x49, 0x19, 0xa3, 0xb2, 0x81, 0xf8, 0xf0,
0xda, 0x3f, 0xa3, 0xe9, 0x88, 0x7a, 0xcc, 0x5e, 0xfc, 0xa8, 0x8b, 0x1b, 0x68, 0x98, 0x69, 0xe2,
0x1f, 0x64, 0xb3, 0x9e, 0xcb, 0x62, 0x43, 0x0e, 0x27, 0x94, 0x7d, 0x41, 0x8a, 0xc2, 0xb1, 0xb5,
0x9f, 0xe5, 0xd1, 0xf1, 0x8b, 0x96, 0x6d, 0xbc, 0x69, 0x75, 0x5c, 0xdd, 0x84, 0xfd, 0x63, 0xce,
0x73, 0x2d, 0xa7, 0xa1, 0x9b, 0xd4, 0xde, 0xc7, 0xa4, 0xd5, 0x54, 0x92, 0xd6, 0x67, 0xb2, 0x8e,
0x2c, 0x59, 0xdf, 0xd4, 0x0c, 0xd6, 0x8d, 0x65, 0xb0, 0xcb, 0x7b, 0x24, 0xaf, 0x7f, 0x3a, 0xfb,
0xbb, 0x1c, 0xba, 0x23, 0x85, 0xf3, 0x20, 0x72, 0xdb, 0x4d, 0x35, 0xb7, 0x5d, 0xda, 0x9b, 0x31,
0xa7, 0x24, 0xba, 0xbf, 0xcf, 0xa7, 0x8e, 0x95, 0xa7, 0x56, 0xaf, 0x43, 0xae, 0xc1, 0xbe, 0x08,
0x5d, 0x97, 0x63, 0x9d, 0xcf, 0xa8, 0x4f, 0xdd, 0x5b, 0xf3, 0x6f, 0x88, 0x00, 0x84, 0xc2, 0x69,
0xb2, 0x41, 0x23, 0xd9, 0x86, 0x04, 0x27, 0x81, 0x18, 0x7c, 0x1a, 0x8d, 0xf1, 0xec, 0x41, 0xd9,
0xe7, 0x0e, 0xb1, 0x33, 0xdb, 0x4a, 0xd8, 0x4c, 0xa2, 0x34, 0xec, 0xa8, 0xd7, 0xd6, 0xb7, 0x63,
0xbb, 0x5c, 0x70, 0xd4, 0x5b, 0x09, 0xbb, 0x48, 0x94, 0x0e, 0x42, 0xfe, 0x64, 0xa3, 0xeb, 0x45,
0x2e, 0x28, 0x65, 0x3a, 0x95, 0x75, 0x88, 0x49, 0x77, 0x9d, 0x55, 0x0c, 0xa2, 0x27, 0xa1, 0x27,
0xd2, 0x46, 0x62, 0xe2, 0xb4, 0x0f, 0x0a, 0xe8, 0xae, 0xbe, 0x3e, 0x8a, 0x97, 0xfa, 0x64, 0x0f,
0xc7, 0x32, 0x64, 0x0e, 0x4d, 0x34, 0x61, 0xea, 0x8e, 0xcb, 0xcd, 0xbd, 0x6a, 0xb4, 0xfd, 0xdd,
0xed, 0x6f, 0x6f, 0xd1, 0x71, 0x19, 0x8b, 0xd8, 0xc2, 0x96, 0xa3, 0x28, 0x44, 0x05, 0x65, 0x19,
0x87, 0xbc, 0x30, 0x48, 0xcb, 0x38, 0xe6, 0xd5, 0x6e, 0x12, 0xa7, 0x67, 0x10, 0xf2, 0x3c, 0x1f,
0xcb, 0x37, 0x02, 0x88, 0x05, 0xb5, 0x9b, 0xc4, 0xe9, 0x21, 0xeb, 0x3b, 0x29, 0x51, 0x55, 0xf3,
0x47, 0x2e, 0x9d, 0x45, 0xde, 0x71, 0x2f, 0xc0, 0x9d, 0x9c, 0xef, 0x4f, 0x4a, 0x6e, 0x86, 0xa5,
0xad, 0xa0, 0x89, 0x8b, 0x96, 0xe3, 0xd6, 0xd8, 0x96, 0xcc, 0xf6, 0x2d, 0x7c, 0x17, 0x2a, 0x80,
0x73, 0xca, 0x93, 0xc8, 0x98, 0x54, 0xbb, 0xc0, 0x9c, 0x97, 0xb5, 0xf3, 0x6e, 0x7d, 0x5b, 0xfa,
0x75, 0xd8, 0x0d, 0x7e, 0xc9, 0xda, 0xb5, 0x0b, 0x68, 0x54, 0xee, 0x8b, 0x51, 0xa0, 0x42, 0x7f,
0xa0, 0x42, 0x02, 0xd0, 0x57, 0xf2, 0x80, 0x24, 0xb6, 0x91, 0x7d, 0xdc, 0x10, 0x5e, 0x56, 0x36,
0x84, 0x73, 0x83, 0x6d, 0xb5, 0xa9, 0x1b, 0x40, 0x33, 0xb6, 0x01, 0x3c, 0x31, 0x20, 0x7e, 0xff,
0x80, 0xff, 0x5e, 0x0e, 0x4d, 0xaa, 0x9b, 0x3e, 0x8b, 0x28, 0x6c, 0x0d, 0x19, 0x0d, 0x7a, 0x39,
0x3c, 0xf0, 0x07, 0x11, 0xa5, 0x1e, 0x76, 0x91, 0x28, 0x1d, 0xa6, 0x01, 0x1b, 0x73, 0x07, 0x69,
0x94, 0x4a, 0x8a, 0xd2, 0xec, 0xc5, 0xa5, 0x22, 0x5e, 0x5c, 0x40, 0x51, 0xf7, 0x0a, 0xac, 0x79,
0x1b, 0xd2, 0xc1, 0x1e, 0x31, 0xdc, 0xb3, 0xa2, 0xb8, 0xda, 0xf7, 0x73, 0x68, 0x4c, 0x2a, 0x7c,
0x10, 0x3b, 0xd2, 0x4b, 0xea, 0x8e, 0xf4, 0xe8, 0x80, 0xf9, 0x54, 0xf2, 0x0e, 0xf4, 0x7e, 0x38,
0x16, 0x96, 0x41, 0xb1, 0x04, 0x6f, 0x03, 0x96, 0x53, 0x3c, 0xc1, 0x63, 0x4b, 0x8c, 0xf0, 0x1e,
0xfc, 0x9f, 0x39, 0x34, 0x65, 0xc4, 0x72, 0x2e, 0x69, 0xea, 0xa7, 0x06, 0x53, 0x2d, 0x80, 0x09,
0xdf, 0xa1, 0xe2, 0x3d, 0xa4, 0x47, 0xa4, 0xe6, 0xa1, 0x1e, 0x2a, 0xac, 0x83, 0xf6, 0xae, 0xdb,
0x1d, 0x70, 0xaf, 0x4c, 0xca, 0x26, 0xab, 0x45, 0x3e, 0x7c, 0xe8, 0x21, 0x1c, 0x5a, 0x7b, 0x2f,
0x1f, 0x18, 0xac, 0x2e, 0xd6, 0x48, 0x90, 0xef, 0xe6, 0xf6, 0x22, 0xdf, 0x1d, 0x4b, 0xca, 0x75,
0x21, 0x82, 0x14, 0x5c, 0x73, 0xd0, 0xfb, 0x36, 0x29, 0x61, 0x75, 0xb9, 0x1e, 0xc6, 0x29, 0xf8,
0x20, 0x0c, 0x12, 0xbf, 0x86, 0x86, 0xd9, 0x69, 0x82, 0x2d, 0xf1, 0xc2, 0xe0, 0x21, 0x84, 0xd9,
0x2b, 0xf4, 0x30, 0xf6, 0x05, 0x1e, 0xc6, 0x71, 0x21, 0x4d, 0x9f, 0x50, 0xe2, 0x00, 0xbe, 0x8e,
0xc6, 0x4d, 0x4b, 0x6f, 0x56, 0x75, 0x53, 0x87, 0x64, 0xc4, 0xbf, 0xf2, 0xff, 0xbb, 0xfe, 0x11,
0x71, 0x39, 0xc2, 0x21, 0xe3, 0x49, 0xf0, 0x16, 0x18, 0xed, 0x23, 0x0a, 0xb6, 0xa6, 0x23, 0x14,
0x8e, 0x1e, 0x9f, 0x44, 0xc3, 0xcc, 0x85, 0xc5, 0xc9, 0xa0, 0x54, 0x2d, 0x31, 0x5d, 0x99, 0x67,
0x83, 0xae, 0xbc, 0x1d, 0x9f, 0x41, 0xc8, 0xa1, 0x0d, 0x9b, 0xba, 0x3c, 0xec, 0xf0, 0xcb, 0xaf,
0x30, 0x00, 0xd7, 0x83, 0x1e, 0x12, 0xa1, 0xd2, 0xfe, 0x2f, 0x8f, 0x0a, 0xcf, 0x58, 0x6b, 0xfb,
0x18, 0xe4, 0x5f, 0x50, 0x82, 0x7c, 0xd6, 0xf5, 0x0f, 0xba, 0xa5, 0x06, 0xf8, 0x7f, 0x8a, 0x05,
0xf8, 0xb3, 0x03, 0x60, 0xf7, 0x0f, 0xee, 0x3f, 0x2e, 0xa0, 0x71, 0xa0, 0x9a, 0xb7, 0x3a, 0x4d,
0x83, 0xa7, 0x42, 0x0f, 0x2b, 0x97, 0x04, 0x77, 0xc7, 0x2e, 0x09, 0xa6, 0xa2, 0xb4, 0x91, 0xeb,
0x81, 0x6b, 0x81, 0xa2, 0x62, 0x52, 0xce, 0xab, 0xe2, 0x80, 0xb3, 0xef, 0xa3, 0x7d, 0x25, 0xc0,
0x54, 0xd5, 0x83, 0xd3, 0x2a, 0xcf, 0xa1, 0x6a, 0xb6, 0xb5, 0x26, 0x12, 0xb3, 0x42, 0xf6, 0xc4,
0xec, 0xa8, 0xd4, 0x85, 0x27, 0x67, 0x01, 0x12, 0x51, 0x81, 0xf1, 0x1b, 0x08, 0xb3, 0x86, 0x55,
0x38, 0x5c, 0x3b, 0x62, 0x74, 0x4c, 0xdc, 0x50, 0x76, 0x71, 0xc1, 0xad, 0xd5, 0x72, 0x0f, 0x1c,
0x49, 0x10, 0xc1, 0xee, 0x31, 0x6c, 0xaa, 0x3b, 0x90, 0xb7, 0x0e, 0x73, 0xd3, 0x85, 0xc7, 0x7d,
0xde, 0x4a, 0x64, 0x2f, 0x7e, 0x10, 0x8d, 0xb6, 0x61, 0x9d, 0xb0, 0xfc, 0x6c, 0x84, 0x13, 0x06,
0x27, 0xef, 0x15, 0xd1, 0x4c, 0xfc, 0x7e, 0xed, 0xdb, 0x39, 0x34, 0x0a, 0x13, 0x75, 0x10, 0x9b,
0xdf, 0xf3, 0xea, 0xe6, 0x77, 0x26, 0xbb, 0x83, 0xa6, 0x6c, 0x7c, 0xdf, 0x2c, 0xf0, 0x31, 0xf0,
0x18, 0x0e, 0x67, 0x9e, 0xae, 0x6e, 0xeb, 0xa6, 0x49, 0x4d, 0xc3, 0x69, 0xcb, 0xd4, 0x91, 0x9f,
0x79, 0x6a, 0x61, 0x33, 0x89, 0xd2, 0x30, 0x96, 0x86, 0xd5, 0xee, 0x9a, 0xd4, 0x7f, 0x61, 0x08,
0x58, 0xe6, 0xc3, 0x66, 0x12, 0xa5, 0xc1, 0x57, 0xd0, 0x51, 0xbd, 0xe1, 0x1a, 0x5b, 0x74, 0x81,
0xea, 0x4d, 0xd3, 0xe8, 0x50, 0xff, 0x36, 0xb7, 0xc0, 0x53, 0xc8, 0xdb, 0x81, 0xf9, 0xe8, 0x5c,
0x12, 0x01, 0x49, 0xe6, 0x53, 0xae, 0xd3, 0x87, 0xf6, 0xf1, 0x3a, 0xfd, 0x61, 0x34, 0xae, 0xc3,
0xc9, 0xc8, 0xef, 0xe1, 0x7e, 0x54, 0xac, 0x4e, 0xb1, 0xd0, 0x3b, 0x17, 0x69, 0x27, 0x0a, 0x95,
0x72, 0x09, 0x3f, 0xb2, 0xd7, 0xaf, 0xad, 0xdf, 0x2a, 0xa0, 0x52, 0x10, 0x7c, 0xb0, 0x85, 0x50,
0xc3, 0x5f, 0xe0, 0xfe, 0xb5, 0xcf, 0xe3, 0xd9, 0x3d, 0x25, 0x08, 0x12, 0x61, 0x3c, 0x0e, 0x9a,
0x1c, 0x12, 0x11, 0x01, 0x11, 0xb9, 0x04, 0x01, 0xc4, 0x76, 0x07, 0x3d, 0xcb, 0x4d, 0x00, 0x76,
0xa9, 0xee, 0x23, 0x90, 0x10, 0x0c, 0xb7, 0xe0, 0x50, 0x1c, 0xf8, 0xcc, 0xa0, 0x11, 0x49, 0x1c,
0x7e, 0x15, 0x18, 0x12, 0x83, 0x65, 0x61, 0x41, 0x78, 0x95, 0x3c, 0xe0, 0x05, 0x61, 0x41, 0xb8,
0x20, 0x91, 0xbd, 0x78, 0x16, 0x86, 0xea, 0x35, 0x1a, 0x94, 0x36, 0x69, 0x53, 0x1e, 0xdc, 0x0e,
0x4b, 0xd2, 0x52, 0xdd, 0xef, 0x20, 0x21, 0x0d, 0x03, 0x5e, 0xd7, 0x0d, 0xf6, 0x80, 0x3c, 0xa2,
0x02, 0x2f, 0xf1, 0x56, 0x22, 0x7b, 0xb5, 0xdf, 0xe6, 0xd1, 0x84, 0xe2, 0x7f, 0xf8, 0x3f, 0x72,
0xec, 0x22, 0xc1, 0x6d, 0x6c, 0xf0, 0x66, 0x7f, 0x22, 0x57, 0x76, 0xe3, 0xd3, 0x95, 0x95, 0x10,
0x4f, 0x3c, 0xd5, 0x45, 0xee, 0x25, 0x82, 0x1e, 0x12, 0x15, 0x8b, 0xff, 0x1b, 0x12, 0x5c, 0xfe,
0xbd, 0xb8, 0xdd, 0x65, 0x99, 0x43, 0xe4, 0x09, 0xf1, 0xc2, 0x6e, 0x74, 0x21, 0xf4, 0x75, 0x0f,
0x4e, 0xca, 0xfc, 0x3e, 0x3a, 0x48, 0x74, 0x57, 0x62, 0x82, 0x48, 0x8f, 0xe8, 0x99, 0xf3, 0x68,
0x2a, 0x3e, 0x8a, 0x4c, 0x4f, 0x76, 0x5f, 0xca, 0xa1, 0x72, 0x9a, 0x22, 0xec, 0x14, 0x1b, 0x00,
0x85, 0xd9, 0xe1, 0xb3, 0x74, 0x47, 0xa0, 0x2e, 0xa2, 0xa2, 0xd5, 0x65, 0xb7, 0x18, 0xf2, 0xc5,
0xae, 0x54, 0x7d, 0xd0, 0x5f, 0x95, 0x57, 0x64, 0x3b, 0xec, 0xbd, 0x47, 0x15, 0x78, 0xbf, 0x83,
0x04, 0xac, 0x58, 0x43, 0x23, 0x5c, 0x1f, 0x91, 0x65, 0x96, 0xaa, 0x88, 0xf9, 0x03, 0xcf, 0xaf,
0x61, 0x2b, 0x16, 0x3d, 0xbc, 0xce, 0xe5, 0x32, 0x75, 0xdf, 0xb0, 0xec, 0xcd, 0x9a, 0x65, 0x1a,
0x8d, 0x9d, 0x7d, 0xcc, 0xa8, 0xd6, 0x94, 0x8c, 0xea, 0xe9, 0x8c, 0xb3, 0xaa, 0x68, 0x99, 0x96,
0x5b, 0x69, 0xbf, 0x01, 0xb3, 0x2b, 0x94, 0xd1, 0x63, 0x16, 0x45, 0xc3, 0xec, 0xf1, 0xc0, 0xf7,
0xf1, 0x5d, 0x69, 0xc0, 0xce, 0xa4, 0x91, 0x5b, 0x6a, 0x06, 0x4b, 0x04, 0x3a, 0x1b, 0xe7, 0xba,
0x6d, 0xb5, 0xa5, 0xf7, 0xee, 0x4e, 0x0a, 0xa5, 0x76, 0x38, 0xce, 0x25, 0x40, 0x25, 0x1c, 0x5b,
0xfb, 0x69, 0x0e, 0x1d, 0x56, 0x28, 0x0f, 0x22, 0x2d, 0xd0, 0xd5, 0xb4, 0xe0, 0x89, 0xdd, 0x8c,
0x2c, 0x25, 0x41, 0xf8, 0xaf, 0x7c, 0x6c, 0x5c, 0xcc, 0x02, 0xb0, 0xd5, 0x8c, 0x75, 0xad, 0x66,
0x7d, 0x2f, 0xcb, 0x95, 0x44, 0xa2, 0x11, 0x82, 0x92, 0xa8, 0x04, 0xfc, 0x6f, 0x60, 0x5e, 0x56,
0xdb, 0xe0, 0x74, 0xf5, 0x06, 0xad, 0xef, 0xe5, 0xeb, 0xf9, 0x51, 0xf6, 0x7c, 0x78, 0x39, 0x0e,
0x4d, 0x7a, 0xa5, 0x69, 0xdf, 0x88, 0x4f, 0x31, 0x73, 0x32, 0xfc, 0x1c, 0x2a, 0xf2, 0xfa, 0xd4,
0x86, 0x65, 0xca, 0xf8, 0xf1, 0x08, 0x9b, 0xad, 0x9a, 0x6c, 0x83, 0xb8, 0xf0, 0x37, 0x7d, 0x73,
0x72, 0x9f, 0x90, 0x04, 0x30, 0x78, 0x19, 0x0d, 0x75, 0x07, 0xbf, 0xb9, 0xe1, 0x47, 0x75, 0x7e,
0x5d, 0xc3, 0x51, 0xb4, 0x3f, 0xc4, 0xd5, 0xe6, 0xc9, 0x9e, 0xb3, 0xf7, 0x33, 0x18, 0xec, 0x29,
0xa9, 0xb3, 0x68, 0xa3, 0x51, 0x79, 0x81, 0x31, 0xe0, 0x4e, 0x92, 0x16, 0x49, 0xc2, 0x2c, 0xdd,
0x6f, 0xf4, 0x05, 0xf1, 0x85, 0xc9, 0x15, 0x6a, 0x78, 0xb6, 0xe1, 0xee, 0xec, 0x7b, 0x50, 0x5d,
0x57, 0x82, 0xea, 0x42, 0xc6, 0x01, 0xf6, 0x68, 0x9a, 0x1a, 0x58, 0x7f, 0x99, 0x43, 0x47, 0x7b,
0xa8, 0x0f, 0x22, 0xe8, 0x50, 0x35, 0xe8, 0x3c, 0xbd, 0xdb, 0x11, 0xa6, 0x04, 0x9e, 0x1b, 0x28,
0x61, 0x7c, 0xdc, 0x75, 0xcf, 0x20, 0xd4, 0xb5, 0x8d, 0x2d, 0x48, 0x9f, 0x5a, 0xb2, 0x86, 0xb0,
0x18, 0xce, 0x49, 0x2d, 0xe8, 0x21, 0x11, 0x2a, 0xfc, 0x2f, 0xac, 0xbe, 0x6f, 0x5d, 0xf7, 0x4c,
0x77, 0xae, 0xd9, 0x9c, 0xd7, 0xbb, 0xfa, 0x9a, 0x61, 0x42, 0x16, 0x2b, 0x5f, 0x8d, 0x4b, 0xd5,
0x45, 0x51, 0xdb, 0x97, 0x44, 0x01, 0x2b, 0xf8, 0xfe, 0xfe, 0xa7, 0x6a, 0x9f, 0x78, 0x87, 0xa4,
0x08, 0xc1, 0xff, 0x0e, 0xbb, 0xa0, 0x2d, 0xf2, 0x8d, 0xe6, 0x82, 0x6d, 0x75, 0x15, 0x0d, 0x44,
0x32, 0x70, 0x01, 0x34, 0x28, 0x93, 0x14, 0x9a, 0x2c, 0x3a, 0xa4, 0x0a, 0xc2, 0x2e, 0x3a, 0x02,
0x27, 0x37, 0xeb, 0x0d, 0xaa, 0x5a, 0x60, 0x88, 0xcb, 0xaf, 0xb2, 0xda, 0x87, 0xb9, 0xde, 0xee,
0x2c, 0xa2, 0x93, 0xe0, 0x21, 0x75, 0x1e, 0xdd, 0xb2, 0x4c, 0x0f, 0xa2, 0x29, 0x24, 0xce, 0x4c,
0x12, 0x8b, 0xb8, 0xa3, 0xd7, 0x44, 0xd3, 0xe7, 0x2c, 0x23, 0xae, 0xf3, 0x2b, 0x0e, 0x9f, 0x8a,
0x5d, 0x7b, 0xb3, 0x7b, 0x28, 0xb9, 0xd6, 0x79, 0xfe, 0x5c, 0x0c, 0x83, 0xcb, 0xc5, 0xb0, 0x8b,
0x44, 0xe9, 0x70, 0x1b, 0x95, 0x36, 0xe4, 0x13, 0x88, 0x53, 0x1e, 0x1d, 0x68, 0x43, 0x54, 0x9e,
0x50, 0xc2, 0x04, 0xdf, 0x6f, 0x76, 0x48, 0x28, 0x81, 0x5d, 0x14, 0xf0, 0x8f, 0x4b, 0x0b, 0xbc,
0xa6, 0xa7, 0x18, 0x86, 0xa0, 0x8b, 0xa2, 0x99, 0xf8, 0xfd, 0x3e, 0xe9, 0xa5, 0xda, 0x7c, 0xb9,
0xd4, 0x4b, 0x0a, 0xcd, 0xc4, 0xef, 0xc7, 0x5d, 0x34, 0xea, 0xd0, 0x65, 0xa3, 0xe3, 0x6d, 0x97,
0x11, 0x5f, 0xba, 0x8b, 0x59, 0x5f, 0x3a, 0x17, 0x39, 0x77, 0xac, 0xbc, 0x22, 0x94, 0x28, 0xfb,
0x89, 0x2f, 0x06, 0x6f, 0xa3, 0x92, 0xed, 0x75, 0xe6, 0x9c, 0xab, 0x0e, 0xb5, 0xcb, 0x63, 0x5c,
0x66, 0xd6, 0xa8, 0x4c, 0x7c, 0xfe, 0xb8, 0xd4, 0xc0, 0x82, 0x01, 0x05, 0x09, 0x85, 0xe1, 0xff,
0xcf, 0x21, 0xec, 0x78, 0x5d, 0x38, 0x8e, 0xb1, 0x1c, 0x5c, 0x37, 0x79, 0x85, 0x87, 0x53, 0x1e,
0xe7, 0x3a, 0xd4, 0x32, 0xbf, 0xf0, 0xc6, 0x81, 0xe2, 0xca, 0x04, 0x37, 0x46, 0xbd, 0xa4, 0x24,
0x41, 0x0f, 0x36, 0x15, 0xeb, 0x0e, 0xff, 0x7f, 0x79, 0x62, 0xa0, 0xa9, 0x48, 0xae, 0x74, 0x09,
0xa7, 0x42, 0xf6, 0x13, 0x5f, 0x0c, 0x2b, 0x42, 0xb6, 0xa9, 0xde, 0xbc, 0xd2, 0x31, 0x77, 0x88,
0x65, 0xb9, 0x4b, 0x10, 0xbb, 0x9c, 0x1d, 0x07, 0xa2, 0x61, 0x79, 0x92, 0xbb, 0x4d, 0x50, 0x84,
0x4c, 0x12, 0xa9, 0x48, 0x0a, 0x37, 0xaf, 0x26, 0x96, 0x0f, 0x93, 0xfb, 0xfb, 0x63, 0x83, 0xdd,
0x55, 0x13, 0x87, 0x2a, 0xee, 0x5b, 0x35, 0x71, 0x44, 0x44, 0xff, 0x0b, 0xdb, 0x4f, 0x0a, 0xe8,
0x48, 0x48, 0x1c, 0xde, 0xdb, 0x3e, 0xae, 0xdc, 0xdb, 0xde, 0x1f, 0xbb, 0xb7, 0x3d, 0x9e, 0xc0,
0xf2, 0xd7, 0xeb, 0xdb, 0x3f, 0xd1, 0xeb, 0x5b, 0x56, 0x30, 0x1e, 0xce, 0xd7, 0x9f, 0x41, 0xc1,
0x78, 0xa8, 0x6c, 0x4a, 0xca, 0xf4, 0x41, 0x3e, 0x3a, 0xa2, 0xbf, 0xc4, 0xaa, 0xe4, 0x84, 0x22,
0xe1, 0xa1, 0x6c, 0x45, 0xc2, 0xda, 0x2f, 0x0a, 0x68, 0x2a, 0x1e, 0x26, 0x94, 0x9a, 0xd7, 0xdc,
0x4d, 0x6b, 0x5e, 0x6b, 0x68, 0x7a, 0xdd, 0x33, 0xcd, 0x1d, 0x6e, 0x90, 0x48, 0x0d, 0x89, 0xb8,
0x1e, 0xbf, 0x53, 0x72, 0x4e, 0x2f, 0x25, 0xd0, 0x90, 0x44, 0xce, 0x94, 0xfa, 0xdd, 0xc2, 0x40,
0xf5, 0xbb, 0x8f, 0xa3, 0x09, 0x9b, 0xff, 0xe2, 0x44, 0x2d, 0x6d, 0x09, 0x02, 0x00, 0x89, 0x76,
0x12, 0x95, 0x36, 0xb9, 0x16, 0x77, 0x78, 0x80, 0x5a, 0xdc, 0x2d, 0xe5, 0xb2, 0x7a, 0x84, 0x2f,
0x88, 0xea, 0xc0, 0x0b, 0xe2, 0x96, 0xef, 0xac, 0xb5, 0x3b, 0xd1, 0x8c, 0x64, 0x63, 0xdf, 0x40,
0xe4, 0xb2, 0x02, 0x58, 0x6a, 0x2f, 0x78, 0xed, 0xf6, 0x8e, 0x76, 0x1e, 0x16, 0x90, 0x52, 0x6a,
0x2d, 0x66, 0x5e, 0x54, 0x7f, 0xcb, 0x1a, 0x98, 0xc8, 0xcc, 0x8b, 0x76, 0x12, 0x50, 0x68, 0x1f,
0xe7, 0xd0, 0xf1, 0x94, 0xe2, 0x5b, 0x7c, 0x1d, 0x4d, 0xb6, 0xf5, 0xed, 0x48, 0x75, 0xb1, 0x0c,
0x31, 0x59, 0xcf, 0xf7, 0xfc, 0x5a, 0x7b, 0x45, 0x41, 0x22, 0x31, 0x64, 0xbe, 0xc5, 0xeb, 0xdb,
0x75, 0xcf, 0x6e, 0xd1, 0x01, 0x6f, 0x11, 0xf8, 0xf2, 0x5d, 0x91, 0x18, 0x24, 0x40, 0x63, 0x25,
0xbc, 0xe5, 0xb4, 0x7c, 0x0f, 0x32, 0xf7, 0x68, 0x11, 0xef, 0x3d, 0xb1, 0x22, 0xde, 0xc3, 0x3d,
0x7c, 0x07, 0x54, 0xc2, 0xfb, 0x7e, 0x0e, 0x1d, 0x4b, 0xce, 0x8b, 0xf1, 0xdf, 0x2b, 0x1a, 0x9f,
0x8c, 0x69, 0x7c, 0x28, 0xc6, 0x25, 0xf5, 0xdd, 0x40, 0x93, 0x32, 0x7b, 0x96, 0x30, 0xb7, 0xf0,
0x03, 0xd5, 0xad, 0x20, 0x35, 0xf7, 0xf3, 0x40, 0x3e, 0x8f, 0x6a, 0x1b, 0x89, 0xe1, 0x6a, 0x5f,
0xcc, 0xa3, 0x61, 0x5e, 0xd9, 0xb6, 0x8f, 0x49, 0xdb, 0x8b, 0x4a, 0xd2, 0x96, 0xf5, 0xed, 0x9b,
0x6b, 0x97, 0x9a, 0xaf, 0xad, 0xc5, 0xf2, 0xb5, 0x73, 0x03, 0xa1, 0xf7, 0x4f, 0xd5, 0x1e, 0x43,
0xa5, 0x40, 0x89, 0x6c, 0x81, 0x9a, 0x25, 0xc6, 0x63, 0x11, 0x11, 0x19, 0xc3, 0xfc, 0x96, 0xb2,
0x63, 0x0e, 0xf2, 0x4b, 0xea, 0x88, 0xec, 0x8a, 0xbf, 0x55, 0x8a, 0xf7, 0x99, 0xb0, 0x36, 0xb5,
0x77, 0x07, 0x85, 0x20, 0x25, 0x7e, 0x8e, 0x1e, 0xdc, 0xde, 0x15, 0xb8, 0xf7, 0x1e, 0x93, 0x3c,
0x93, 0xab, 0x4a, 0x2f, 0x89, 0x51, 0xcf, 0xc0, 0x06, 0xa0, 0x08, 0xcb, 0xf4, 0x8c, 0xf2, 0xdd,
0x1c, 0x9a, 0x4e, 0xaa, 0xa6, 0x65, 0x25, 0x53, 0x9b, 0x86, 0x2c, 0xff, 0x89, 0x94, 0x4c, 0x3d,
0x0b, 0x6d, 0x84, 0xf7, 0x04, 0x3f, 0x5c, 0xcb, 0xa7, 0xfe, 0x70, 0xed, 0x0c, 0x42, 0x60, 0x2a,
0xf9, 0x13, 0x7f, 0x39, 0xaa, 0xc0, 0x79, 0xc3, 0x1f, 0xff, 0x93, 0x08, 0x15, 0x2f, 0x92, 0x0b,
0xf5, 0xe1, 0x9b, 0x59, 0xb4, 0x48, 0x2e, 0xa2, 0x6a, 0x94, 0x4e, 0xfb, 0x5e, 0x0e, 0xdd, 0x73,
0xd3, 0x33, 0x23, 0xae, 0x2a, 0xe1, 0xa1, 0x12, 0x0b, 0x0f, 0x27, 0xd2, 0x01, 0x0e, 0xf0, 0x07,
0x0a, 0x6f, 0xe7, 0x11, 0x5e, 0xdd, 0x30, 0xec, 0x66, 0x4d, 0xb7, 0x5d, 0xd8, 0xa9, 0xc5, 0x00,
0xf7, 0x31, 0x60, 0x80, 0xc5, 0x9b, 0xd4, 0x69, 0xd8, 0x06, 0x37, 0x92, 0x9c, 0xce, 0xc0, 0xe2,
0x0b, 0x61, 0x17, 0x89, 0xd2, 0xc1, 0xd9, 0xad, 0x28, 0xf3, 0x65, 0xbf, 0xca, 0x2a, 0x6b, 0x02,
0x1c, 0x7a, 0x40, 0xb8, 0x3e, 0x64, 0x03, 0xac, 0x4b, 0x1f, 0x5c, 0x7b, 0x07, 0xc2, 0x7d, 0xaf,
0x41, 0x16, 0x44, 0x0d, 0xd1, 0x7e, 0x19, 0xe5, 0x4e, 0x34, 0xc4, 0x51, 0x99, 0x35, 0xc6, 0xc5,
0x1d, 0x3c, 0x93, 0x48, 0x78, 0xab, 0xf6, 0x59, 0x0e, 0xcd, 0x24, 0xab, 0x74, 0x10, 0xe7, 0x8e,
0xeb, 0xea, 0xb9, 0x23, 0xeb, 0x3d, 0x46, 0xb2, 0xe2, 0x29, 0x67, 0x90, 0x8f, 0x13, 0x8d, 0x7f,
0x10, 0xa3, 0x5c, 0x57, 0x47, 0x39, 0xb7, 0xeb, 0x51, 0x26, 0x8f, 0xb0, 0xfa, 0xe0, 0x8d, 0x5f,
0x9d, 0xb8, 0xed, 0x43, 0xf8, 0xfb, 0x39, 0xfc, 0xbd, 0xf5, 0xe9, 0x89, 0xdc, 0x0d, 0xf8, 0xfb,
0x10, 0xfe, 0x3e, 0x81, 0xbf, 0x77, 0x7e, 0x7d, 0xe2, 0xb6, 0x17, 0x47, 0x25, 0xe6, 0x1f, 0x03,
0x00, 0x00, 0xff, 0xff, 0x3c, 0x85, 0xb5, 0xb9, 0x4b, 0x46, 0x00, 0x00,
}

View File

@ -127,6 +127,10 @@ message DaemonSetStatus {
// pod (including nodes correctly running the daemon pod).
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
optional int32 desiredNumberScheduled = 3;
// NumberReady is the number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
optional int32 numberReady = 4;
}
// Deployment enables declarative updates for Pods and ReplicaSets.

File diff suppressed because it is too large Load Diff

View File

@ -459,6 +459,10 @@ type DaemonSetStatus struct {
// pod (including nodes correctly running the daemon pod).
// More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md
DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
// NumberReady is the number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
}
// +genclient=true

View File

@ -99,6 +99,7 @@ var map_DaemonSetStatus = map[string]string{
"currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
"numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
"desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md",
"numberReady": "NumberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
}
func (DaemonSetStatus) SwaggerDoc() map[string]string {

View File

@ -455,6 +455,7 @@ func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *Daemo
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
return nil
}
@ -466,6 +467,7 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *exten
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
return nil
}

View File

@ -249,6 +249,7 @@ func DeepCopy_v1beta1_DaemonSetStatus(in interface{}, out interface{}, c *conver
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
return nil
}
}

View File

@ -226,6 +226,7 @@ func DeepCopy_extensions_DaemonSetStatus(in interface{}, out interface{}, c *con
out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled
out.NumberReady = in.NumberReady
return nil
}
}

View File

@ -25,6 +25,7 @@ import (
"path"
gruntime "runtime"
"strings"
"time"
"github.com/golang/glog"
@ -109,6 +110,9 @@ type Config struct {
// Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst
RateLimiter flowcontrol.RateLimiter
// The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
Timeout time.Duration
// Version forces a specific version to be used (if registered)
// Do we need this?
// Version string
@ -185,6 +189,9 @@ func RESTClientFor(config *Config) (*RESTClient, error) {
var httpClient *http.Client
if transport != http.DefaultTransport {
httpClient = &http.Client{Transport: transport}
if config.Timeout > 0 {
httpClient.Timeout = config.Timeout
}
}
return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient)
@ -210,6 +217,9 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
var httpClient *http.Client
if transport != http.DefaultTransport {
httpClient = &http.Client{Transport: transport}
if config.Timeout > 0 {
httpClient.Timeout = config.Timeout
}
}
versionConfig := config.ContentConfig
@ -353,5 +363,6 @@ func AnonymousClientConfig(config *Config) *Config {
WrapTransport: config.WrapTransport,
QPS: config.QPS,
Burst: config.Burst,
Timeout: config.Timeout,
}
}

View File

@ -1,79 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
import (
"time"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/util/wait"
)
// DefaultRetry is the recommended retry for a conflict where multiple clients
// are making changes to the same resource.
var DefaultRetry = wait.Backoff{
Steps: 5,
Duration: 10 * time.Millisecond,
Factor: 1.0,
Jitter: 0.1,
}
// DefaultBackoff is the recommended backoff for a conflict where a client
// may be attempting to make an unrelated modification to a resource under
// active management by one or more controllers.
var DefaultBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting
// write. Callers should preserve previous executions if they wish to retry changes. It performs an
// exponential backoff.
//
// var pod *api.Pod
// err := RetryOnConflict(DefaultBackoff, func() (err error) {
// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus)
// return
// })
// if err != nil {
// // may be conflict if max retries were hit
// return err
// }
// ...
//
// TODO: Make Backoff an interface?
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
var lastConflictErr error
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
err := fn()
switch {
case err == nil:
return true, nil
case errors.IsConflict(err):
lastConflictErr = err
return false, nil
default:
return false, err
}
})
if err == wait.ErrWaitTimeout {
err = lastConflictErr
}
return err
}

View File

@ -16,7 +16,10 @@ limitations under the License.
package common
import "github.com/go-openapi/spec"
import (
"github.com/emicklei/go-restful"
"github.com/go-openapi/spec"
)
// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi.
type OpenAPIDefinition struct {
@ -35,6 +38,27 @@ type OpenAPIDefinitionGetter interface {
OpenAPIDefinition() *OpenAPIDefinition
}
// Config is set of configuration for openAPI spec generation.
type Config struct {
// List of supported protocols such as https, http, etc.
ProtocolList []string
// Info is general information about the API.
Info *spec.Info
// DefaultResponse will be used if an operation does not have any responses listed. It
// will show up as ... "responses" : {"default" : $DefaultResponse} in the spec.
DefaultResponse *spec.Response
// List of webservice's path prefixes to ignore
IgnorePrefixes []string
// OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map
// or any of the models will result in spec generation failure.
Definitions *OpenAPIDefinitions
// GetOperationID returns operation id for a restful route. It is an optional function to customize operation IDs.
GetOperationID func(servePath string, r *restful.Route) (string, error)
}
// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are
// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type
// comment (the comment that is added before type definition) will be lost. The spec will still have the property

View File

@ -68,11 +68,13 @@ func GetPodQOS(pod *api.Pod) QOSClass {
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !supportedQoSComputeResources.Has(string(name)) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.Copy()
if _, exists := limits[name]; !exists {
limits[name] = *delta
@ -82,7 +84,8 @@ func GetPodQOS(pod *api.Pod) QOSClass {
}
}
}
if len(limits) != len(supportedQoSComputeResources) {
if len(qosLimitsFound) != len(supportedQoSComputeResources) {
isGuaranteed = false
}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package labels
import (
"fmt"
"sort"
"strings"
)
@ -78,3 +79,103 @@ func FormatLabels(labelMap map[string]string) string {
}
return l
}
// Conflicts takes 2 maps and returns true if there a key match between
// the maps but the value doesn't match, and returns false in other cases
func Conflicts(labels1, labels2 Set) bool {
small := labels1
big := labels2
if len(labels2) < len(labels1) {
small = labels2
big = labels1
}
for k, v := range small {
if val, match := big[k]; match {
if val != v {
return true
}
}
}
return false
}
// Merge combines given maps, and does not check for any conflicts
// between the maps. In case of conflicts, second map (labels2) wins
func Merge(labels1, labels2 Set) Set {
mergedMap := Set{}
for k, v := range labels1 {
mergedMap[k] = v
}
for k, v := range labels2 {
mergedMap[k] = v
}
return mergedMap
}
// Equals returns true if the given maps are equal
func Equals(labels1, labels2 Set) bool {
if len(labels1) != len(labels2) {
return false
}
for k, v := range labels1 {
value, ok := labels2[k]
if !ok {
return false
}
if value != v {
return false
}
}
return true
}
// AreLabelsInWhiteList verifies if the provided label list
// is in the provided whitelist and returns true, otherwise false.
func AreLabelsInWhiteList(labels, whitelist Set) bool {
if len(whitelist) == 0 {
return true
}
for k, v := range labels {
value, ok := whitelist[k]
if !ok {
return false
}
if value != v {
return false
}
}
return true
}
// ConvertSelectorToLabelsMap converts selector string to labels map
// and validates keys and values
func ConvertSelectorToLabelsMap(selector string) (Set, error) {
labelsMap := Set{}
if len(selector) == 0 {
return labelsMap, nil
}
labels := strings.Split(selector, ",")
for _, label := range labels {
l := strings.Split(label, "=")
if len(l) != 2 {
return labelsMap, fmt.Errorf("invalid selector: %s", l)
}
key := strings.TrimSpace(l[0])
if err := validateLabelKey(key); err != nil {
return labelsMap, err
}
value := strings.TrimSpace(l[1])
if err := validateLabelValue(value); err != nil {
return labelsMap, err
}
labelsMap[key] = value
}
return labelsMap, nil
}

View File

@ -1,56 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not.
type StringFlag struct {
// If Set has been invoked this value is true
provided bool
// The exact value provided on the flag
value string
}
func NewStringFlag(defaultVal string) StringFlag {
return StringFlag{value: defaultVal}
}
func (f *StringFlag) Default(value string) {
f.value = value
}
func (f StringFlag) String() string {
return f.value
}
func (f StringFlag) Value() string {
return f.value
}
func (f *StringFlag) Set(value string) error {
f.value = value
f.provided = true
return nil
}
func (f StringFlag) Provided() bool {
return f.provided
}
func (f *StringFlag) Type() string {
return "string"
}

79
vendor/k8s.io/kubernetes/pkg/util/trie.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// A simple trie implementation with Add an HasPrefix methods only.
type Trie struct {
children map[byte]*Trie
wordTail bool
word string
}
// CreateTrie creates a Trie and add all strings in the provided list to it.
func CreateTrie(list []string) Trie {
ret := Trie{
children: make(map[byte]*Trie),
wordTail: false,
}
for _, v := range list {
ret.Add(v)
}
return ret
}
// Add adds a word to this trie
func (t *Trie) Add(v string) {
root := t
for _, b := range []byte(v) {
child, exists := root.children[b]
if !exists {
child = &Trie{
children: make(map[byte]*Trie),
wordTail: false,
}
root.children[b] = child
}
root = child
}
root.wordTail = true
root.word = v
}
// HasPrefix returns true of v has any of the prefixes stored in this trie.
func (t *Trie) HasPrefix(v string) bool {
_, has := t.GetPrefix(v)
return has
}
// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise.
func (t *Trie) GetPrefix(v string) (string, bool) {
root := t
if root.wordTail {
return root.word, true
}
for _, b := range []byte(v) {
child, exists := root.children[b]
if !exists {
return "", false
}
if child.wordTail {
return child.word, true
}
root = child
}
return "", false
}

View File

@ -0,0 +1,438 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
"cloud.google.com/go/internal"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
metaClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
},
}
subscribeClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
},
}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
} else {
v, err = Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() {
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

View File

@ -0,0 +1,64 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal provides support for the cloud packages.
//
// Users should not import this package directly.
package internal
import (
"fmt"
"net/http"
)
const userAgent = "gcloud-golang/0.1"
// Transport is an http.RoundTripper that appends Google Cloud client's
// user-agent to the original request's user-agent header.
type Transport struct {
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
// Do User-Agent some other way.
// Base is the actual http.RoundTripper
// requests will use. It must not be nil.
Base http.RoundTripper
}
// RoundTrip appends a user-agent to the existing user-agent
// header and delegates the request to the base http.RoundTripper.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
req = cloneRequest(req)
ua := req.Header.Get("User-Agent")
if ua == "" {
ua = userAgent
} else {
ua = fmt.Sprintf("%s %s", ua, userAgent)
}
req.Header.Set("User-Agent", ua)
return t.Base.RoundTrip(req)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}

View File

@ -0,0 +1,5 @@
*.sublime-*
.DS_Store
*.swp
*.swo
tags

View File

@ -0,0 +1,7 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

View File

@ -0,0 +1,12 @@
Copyright (c) 2012, Martin Angers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,185 @@
# Purell
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
## Install
`go get github.com/PuerkitoBio/purell`
## Changelog
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
* **v0.2.0** : Add benchmarks, Attempt IDN support.
* **v0.1.0** : Initial release.
## Examples
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
```go
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}
```
## API
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
```go
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
```
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
The [full godoc reference is available on gopkgdoc][godoc].
Some things to note:
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
- %24 -> $
- %26 -> &
- %2B-%3B -> +,-./0123456789:;
- %3D -> =
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
- %5F -> _
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
- %7E -> ~
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
### Safe vs Usually Safe vs Unsafe
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
Consider the following URL:
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
Normalizing with the `FlagsSafe` gives:
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
With the `FlagsUsuallySafeGreedy`:
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
And with `FlagsUnsafeGreedy`:
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
## TODOs
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
## Thanks / Contributions
@rogpeppe
@jehiah
@opennota
@pchristopher1275
@zenovich
## License
The [BSD 3-Clause license][bsd].
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
[iss7]: https://github.com/PuerkitoBio/purell/issues/7

View File

@ -0,0 +1,375 @@
/*
Package purell offers URL normalization as described on the wikipedia page:
http://en.wikipedia.org/wiki/URL_normalization
*/
package purell
import (
"bytes"
"fmt"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/secure/precis"
"golang.org/x/text/unicode/norm"
)
// A set of normalization flags determines how a URL will
// be normalized.
type NormalizationFlags uint
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
)
// Regular expressions used by the normalizations
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
var rxEmptyPort = regexp.MustCompile(`:+$`)
// Map of flags to implementation function.
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
// Since maps have undefined traversing order, make a slice of ordered keys
var flagsOrder = []NormalizationFlags{
FlagLowercaseScheme,
FlagLowercaseHost,
FlagRemoveDefaultPort,
FlagRemoveDirectoryIndex,
FlagRemoveDotSegments,
FlagRemoveFragment,
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
FlagRemoveDuplicateSlashes,
FlagRemoveWWW,
FlagAddWWW,
FlagSortQuery,
FlagDecodeDWORDHost,
FlagDecodeOctalHost,
FlagDecodeHexHost,
FlagRemoveUnnecessaryHostDots,
FlagRemoveEmptyPortSeparator,
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
FlagAddTrailingSlash,
}
// ... and then the map, where order is unimportant
var flags = map[NormalizationFlags]func(*url.URL){
FlagLowercaseScheme: lowercaseScheme,
FlagLowercaseHost: lowercaseHost,
FlagRemoveDefaultPort: removeDefaultPort,
FlagRemoveDirectoryIndex: removeDirectoryIndex,
FlagRemoveDotSegments: removeDotSegments,
FlagRemoveFragment: removeFragment,
FlagForceHTTP: forceHTTP,
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
FlagRemoveWWW: removeWWW,
FlagAddWWW: addWWW,
FlagSortQuery: sortQuery,
FlagDecodeDWORDHost: decodeDWORDHost,
FlagDecodeOctalHost: decodeOctalHost,
FlagDecodeHexHost: decodeHexHost,
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
FlagRemoveTrailingSlash: removeTrailingSlash,
FlagAddTrailingSlash: addTrailingSlash,
}
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
// It takes an URL string as input, as well as the normalization flags.
func MustNormalizeURLString(u string, f NormalizationFlags) string {
result, e := NormalizeURLString(u, f)
if e != nil {
panic(e)
}
return result
}
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
if parsed, e := url.Parse(u); e != nil {
return "", e
} else {
options := make([]precis.Option, 1, 3)
options[0] = precis.IgnoreCase
if f&FlagLowercaseHost == FlagLowercaseHost {
options = append(options, precis.FoldCase())
}
options = append(options, precis.Norm(norm.NFC))
profile := precis.NewFreeform(options...)
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
return "", e
}
return NormalizeURL(parsed, f), nil
}
panic("Unreachable code.")
}
// NormalizeURL returns the normalized string.
// It takes a parsed URL object as input, as well as the normalization flags.
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
for _, k := range flagsOrder {
if f&k == k {
flags[k](u)
}
}
return urlesc.Escape(u)
}
func lowercaseScheme(u *url.URL) {
if len(u.Scheme) > 0 {
u.Scheme = strings.ToLower(u.Scheme)
}
}
func lowercaseHost(u *url.URL) {
if len(u.Host) > 0 {
u.Host = strings.ToLower(u.Host)
}
}
func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
return ""
}
return val
})
}
}
func removeTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if strings.HasSuffix(u.Path, "/") {
u.Path = u.Path[:l-1]
}
} else if l = len(u.Host); l > 0 {
if strings.HasSuffix(u.Host, "/") {
u.Host = u.Host[:l-1]
}
}
}
func addTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
} else if l = len(u.Host); l > 0 {
if !strings.HasSuffix(u.Host, "/") {
u.Host += "/"
}
}
}
func removeDotSegments(u *url.URL) {
if len(u.Path) > 0 {
var dotFree []string
var lastIsDot bool
sections := strings.Split(u.Path, "/")
for _, s := range sections {
if s == ".." {
if len(dotFree) > 0 {
dotFree = dotFree[:len(dotFree)-1]
}
} else if s != "." {
dotFree = append(dotFree, s)
}
lastIsDot = (s == "." || s == "..")
}
// Special case if host does not end with / and new path does not begin with /
u.Path = strings.Join(dotFree, "/")
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
u.Path = "/" + u.Path
}
// Special case if the last segment was a dot, make sure the path ends with a slash
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
}
}
func removeDirectoryIndex(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
}
}
func removeFragment(u *url.URL) {
u.Fragment = ""
}
func forceHTTP(u *url.URL) {
if strings.ToLower(u.Scheme) == "https" {
u.Scheme = "http"
}
}
func removeDuplicateSlashes(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
}
}
func removeWWW(u *url.URL) {
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = u.Host[4:]
}
}
func addWWW(u *url.URL) {
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = "www." + u.Host
}
}
func sortQuery(u *url.URL) {
q := u.Query()
if len(q) > 0 {
arKeys := make([]string, len(q))
i := 0
for k, _ := range q {
arKeys[i] = k
i++
}
sort.Strings(arKeys)
buf := new(bytes.Buffer)
for _, k := range arKeys {
sort.Strings(q[k])
for _, v := range q[k] {
if buf.Len() > 0 {
buf.WriteRune('&')
}
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
}
}
// Rebuild the raw query string
u.RawQuery = buf.String()
}
}
func decodeDWORDHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
var parts [4]int64
dword, _ := strconv.ParseInt(matches[1], 10, 0)
for i, shift := range []uint{24, 16, 8, 0} {
parts[i] = dword >> shift & 0xFF
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
}
}
}
func decodeOctalHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
var parts [4]int64
for i := 1; i <= 4; i++ {
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
}
}
}
func decodeHexHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
// Conversion is safe because of regex validation
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
// Set host as DWORD (base 10) encoded host
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
// The rest is the same as decoding a DWORD host
decodeDWORDHost(u)
}
}
}
func removeUnncessaryHostDots(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
// Trim the leading and trailing dots
u.Host = strings.Trim(matches[1], ".")
if len(matches) > 2 {
u.Host += matches[2]
}
}
}
}
func removeEmptyPortSeparator(u *url.URL) {
if len(u.Host) > 0 {
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
}
}

View File

@ -0,0 +1,11 @@
language: go
go:
- 1.4
- tip
install:
- go build .
script:
- go test -v

View File

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,16 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.
It contains some parts of the net/url package, modified so as to allow
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
## Install
go get github.com/PuerkitoBio/urlesc
## License
Go license (BSD-3-Clause)

View File

@ -0,0 +1,180 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package urlesc implements query escaping as per RFC 3986.
// It contains some parts of the net/url package, modified so as to allow
// some reserved characters incorrectly escaped by net/url.
// See https://github.com/golang/go/issues/5684
package urlesc
import (
"bytes"
"net/url"
"strings"
)
type encoding int
const (
encodePath encoding = 1 + iota
encodeUserPassword
encodeQueryComponent
encodeFragment
)
// Return true if the specified character should be escaped when
// appearing in a URL string, according to RFC 3986.
func shouldEscape(c byte, mode encoding) bool {
// §2.3 Unreserved characters (alphanum)
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
return false
// §2.2 Reserved characters (reserved)
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
// Different sections of the URL allow a few of
// the reserved characters to appear unescaped.
switch mode {
case encodePath: // §3.3
// The RFC allows sub-delims and : @.
// '/', '[' and ']' can be used to assign meaning to individual path
// segments. This package only manipulates the path as a whole,
// so we allow those as well. That leaves only ? and # to escape.
return c == '?' || c == '#'
case encodeUserPassword: // §3.2.1
// The RFC allows : and sub-delims in
// userinfo. The parsing of userinfo treats ':' as special so we must escape
// all the gen-delims.
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
case encodeQueryComponent: // §3.4
// The RFC allows / and ?.
return c != '/' && c != '?'
case encodeFragment: // §4.1
// The RFC text is silent but the grammar allows
// everything, so escape nothing but #
return c == '#'
}
}
// Everything else must be escaped.
return true
}
// QueryEscape escapes the string so it can be safely placed
// inside a URL query.
func QueryEscape(s string) string {
return escape(s, encodeQueryComponent)
}
func escape(s string, mode encoding) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c, mode) {
if c == ' ' && mode == encodeQueryComponent {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ' && mode == encodeQueryComponent:
t[j] = '+'
j++
case shouldEscape(c, mode):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
var uiReplacer = strings.NewReplacer(
"%21", "!",
"%27", "'",
"%28", "(",
"%29", ")",
"%2A", "*",
)
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
func unescapeUserinfo(s string) string {
return uiReplacer.Replace(s)
}
// Escape reassembles the URL into a valid URL string.
// The general form of the result is one of:
//
// scheme:opaque
// scheme://userinfo@host/path?query#fragment
//
// If u.Opaque is non-empty, String uses the first form;
// otherwise it uses the second form.
//
// In the second form, the following rules apply:
// - if u.Scheme is empty, scheme: is omitted.
// - if u.User is nil, userinfo@ is omitted.
// - if u.Host is empty, host/ is omitted.
// - if u.Scheme and u.Host are empty and u.User is nil,
// the entire scheme://userinfo@host/ is omitted.
// - if u.Host is non-empty and u.Path begins with a /,
// the form host/path does not add its own /.
// - if u.RawQuery is empty, ?query is omitted.
// - if u.Fragment is empty, #fragment is omitted.
func Escape(u *url.URL) string {
var buf bytes.Buffer
if u.Scheme != "" {
buf.WriteString(u.Scheme)
buf.WriteByte(':')
}
if u.Opaque != "" {
buf.WriteString(u.Opaque)
} else {
if u.Scheme != "" || u.Host != "" || u.User != nil {
buf.WriteString("//")
if ui := u.User; ui != nil {
buf.WriteString(unescapeUserinfo(ui.String()))
buf.WriteByte('@')
}
if h := u.Host; h != "" {
buf.WriteString(h)
}
}
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
buf.WriteString(escape(u.Path, encodePath))
}
if u.RawQuery != "" {
buf.WriteByte('?')
buf.WriteString(u.RawQuery)
}
if u.Fragment != "" {
buf.WriteByte('#')
buf.WriteString(escape(u.Fragment, encodeFragment))
}
return buf.String()
}

View File

@ -0,0 +1,22 @@
The MIT License
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,142 @@
semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
Usage
-----
```bash
$ go get github.com/blang/semver
```
Note: Always vendor your dependencies or fix on a specific version tag.
```go
import github.com/blang/semver
v1, err := semver.Make("1.0.0-beta")
v2, err := semver.Make("2.0.0-beta")
v1.Compare(v2)
```
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
Why should I use this lib?
-----
- Fully spec compatible
- No reflection
- No regex
- Fully tested (Coverage >99%)
- Readable parsing/validation errors
- Fast (See [Benchmarks](#benchmarks))
- Only Stdlib
- Uses values instead of pointers
- Many features, see below
Features
-----
- Parsing and validation at all levels
- Comparator-like comparisons
- Compare Helper Methods
- InPlace manipulation
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
Example
-----
Have a look at full examples in [examples/main.go](examples/main.go)
```go
import github.com/blang/semver
v, err := semver.Make("0.0.1-alpha.preview+123.github")
fmt.Printf("Major: %d\n", v.Major)
fmt.Printf("Minor: %d\n", v.Minor)
fmt.Printf("Patch: %d\n", v.Patch)
fmt.Printf("Pre: %s\n", v.Pre)
fmt.Printf("Build: %s\n", v.Build)
// Prerelease versions array
if len(v.Pre) > 0 {
fmt.Println("Prerelease versions:")
for i, pre := range v.Pre {
fmt.Printf("%d: %q\n", i, pre)
}
}
// Build meta data array
if len(v.Build) > 0 {
fmt.Println("Build meta data:")
for i, build := range v.Build {
fmt.Printf("%d: %q\n", i, build)
}
}
v001, err := semver.Make("0.0.1")
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
v001.GT(v) == true
v.LT(v001) == true
v.GTE(v) == true
v.LTE(v) == true
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
v001.Compare(v) == 1
v.Compare(v001) == -1
v.Compare(v) == 0
// Manipulate Version in place:
v.Pre[0], err = semver.NewPRVersion("beta")
if err != nil {
fmt.Printf("Error parsing pre release version: %q", err)
}
fmt.Println("\nValidate versions:")
v.Build[0] = "?"
err = v.Validate()
if err != nil {
fmt.Printf("Validation failed: %s\n", err)
}
```
Benchmarks
-----
BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
See benchmark cases at [semver_test.go](semver_test.go)
Motivation
-----
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
Contribution
-----
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
License
-----
See [LICENSE](LICENSE) file.

View File

@ -0,0 +1,23 @@
package semver
import (
"encoding/json"
)
// MarshalJSON implements the encoding/json.Marshaler interface.
func (v Version) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
func (v *Version) UnmarshalJSON(data []byte) (err error) {
var versionString string
if err = json.Unmarshal(data, &versionString); err != nil {
return
}
*v, err = Parse(versionString)
return
}

View File

@ -0,0 +1,395 @@
package semver
import (
"errors"
"fmt"
"strconv"
"strings"
)
const (
numbers string = "0123456789"
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
alphanum = alphas + numbers
)
// SpecVersion is the latest fully supported spec version of semver
var SpecVersion = Version{
Major: 2,
Minor: 0,
Patch: 0,
}
// Version represents a semver compatible version
type Version struct {
Major uint64
Minor uint64
Patch uint64
Pre []PRVersion
Build []string //No Precendence
}
// Version to string
func (v Version) String() string {
b := make([]byte, 0, 5)
b = strconv.AppendUint(b, v.Major, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Minor, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Patch, 10)
if len(v.Pre) > 0 {
b = append(b, '-')
b = append(b, v.Pre[0].String()...)
for _, pre := range v.Pre[1:] {
b = append(b, '.')
b = append(b, pre.String()...)
}
}
if len(v.Build) > 0 {
b = append(b, '+')
b = append(b, v.Build[0]...)
for _, build := range v.Build[1:] {
b = append(b, '.')
b = append(b, build...)
}
}
return string(b)
}
// Equals checks if v is equal to o.
func (v Version) Equals(o Version) bool {
return (v.Compare(o) == 0)
}
// EQ checks if v is equal to o.
func (v Version) EQ(o Version) bool {
return (v.Compare(o) == 0)
}
// NE checks if v is not equal to o.
func (v Version) NE(o Version) bool {
return (v.Compare(o) != 0)
}
// GT checks if v is greater than o.
func (v Version) GT(o Version) bool {
return (v.Compare(o) == 1)
}
// GTE checks if v is greater than or equal to o.
func (v Version) GTE(o Version) bool {
return (v.Compare(o) >= 0)
}
// GE checks if v is greater than or equal to o.
func (v Version) GE(o Version) bool {
return (v.Compare(o) >= 0)
}
// LT checks if v is less than o.
func (v Version) LT(o Version) bool {
return (v.Compare(o) == -1)
}
// LTE checks if v is less than or equal to o.
func (v Version) LTE(o Version) bool {
return (v.Compare(o) <= 0)
}
// LE checks if v is less than or equal to o.
func (v Version) LE(o Version) bool {
return (v.Compare(o) <= 0)
}
// Compare compares Versions v to o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v Version) Compare(o Version) int {
if v.Major != o.Major {
if v.Major > o.Major {
return 1
}
return -1
}
if v.Minor != o.Minor {
if v.Minor > o.Minor {
return 1
}
return -1
}
if v.Patch != o.Patch {
if v.Patch > o.Patch {
return 1
}
return -1
}
// Quick comparison if a version has no prerelease versions
if len(v.Pre) == 0 && len(o.Pre) == 0 {
return 0
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
return 1
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
return -1
}
i := 0
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
continue
} else if comp == 1 {
return 1
} else {
return -1
}
}
// If all pr versions are the equal but one has further prversion, this one greater
if i == len(v.Pre) && i == len(o.Pre) {
return 0
} else if i == len(v.Pre) && i < len(o.Pre) {
return -1
} else {
return 1
}
}
// Validate validates v and returns error in case
func (v Version) Validate() error {
// Major, Minor, Patch already validated using uint64
for _, pre := range v.Pre {
if !pre.IsNum { //Numeric prerelease versions already uint64
if len(pre.VersionStr) == 0 {
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
}
if !containsOnly(pre.VersionStr, alphanum) {
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
}
}
}
for _, build := range v.Build {
if len(build) == 0 {
return fmt.Errorf("Build meta data can not be empty %q", build)
}
if !containsOnly(build, alphanum) {
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
}
}
return nil
}
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
func New(s string) (vp *Version, err error) {
v, err := Parse(s)
vp = &v
return
}
// Make is an alias for Parse, parses version string and returns a validated Version or error
func Make(s string) (Version, error) {
return Parse(s)
}
// Parse parses version string and returns a validated Version or error
func Parse(s string) (Version, error) {
if len(s) == 0 {
return Version{}, errors.New("Version string empty")
}
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
if len(parts) != 3 {
return Version{}, errors.New("No Major.Minor.Patch elements found")
}
// Major
if !containsOnly(parts[0], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
}
if hasLeadingZeroes(parts[0]) {
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
}
major, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return Version{}, err
}
// Minor
if !containsOnly(parts[1], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
}
if hasLeadingZeroes(parts[1]) {
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
}
minor, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return Version{}, err
}
v := Version{}
v.Major = major
v.Minor = minor
var build, prerelease []string
patchStr := parts[2]
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
build = strings.Split(patchStr[buildIndex+1:], ".")
patchStr = patchStr[:buildIndex]
}
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
prerelease = strings.Split(patchStr[preIndex+1:], ".")
patchStr = patchStr[:preIndex]
}
if !containsOnly(patchStr, numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
}
if hasLeadingZeroes(patchStr) {
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
}
patch, err := strconv.ParseUint(patchStr, 10, 64)
if err != nil {
return Version{}, err
}
v.Patch = patch
// Prerelease
for _, prstr := range prerelease {
parsedPR, err := NewPRVersion(prstr)
if err != nil {
return Version{}, err
}
v.Pre = append(v.Pre, parsedPR)
}
// Build meta data
for _, str := range build {
if len(str) == 0 {
return Version{}, errors.New("Build meta data is empty")
}
if !containsOnly(str, alphanum) {
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
}
v.Build = append(v.Build, str)
}
return v, nil
}
// MustParse is like Parse but panics if the version cannot be parsed.
func MustParse(s string) Version {
v, err := Parse(s)
if err != nil {
panic(`semver: Parse(` + s + `): ` + err.Error())
}
return v
}
// PRVersion represents a PreRelease Version
type PRVersion struct {
VersionStr string
VersionNum uint64
IsNum bool
}
// NewPRVersion creates a new valid prerelease version
func NewPRVersion(s string) (PRVersion, error) {
if len(s) == 0 {
return PRVersion{}, errors.New("Prerelease is empty")
}
v := PRVersion{}
if containsOnly(s, numbers) {
if hasLeadingZeroes(s) {
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
}
num, err := strconv.ParseUint(s, 10, 64)
// Might never be hit, but just in case
if err != nil {
return PRVersion{}, err
}
v.VersionNum = num
v.IsNum = true
} else if containsOnly(s, alphanum) {
v.VersionStr = s
v.IsNum = false
} else {
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
}
return v, nil
}
// IsNumeric checks if prerelease-version is numeric
func (v PRVersion) IsNumeric() bool {
return v.IsNum
}
// Compare compares two PreRelease Versions v and o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v PRVersion) Compare(o PRVersion) int {
if v.IsNum && !o.IsNum {
return -1
} else if !v.IsNum && o.IsNum {
return 1
} else if v.IsNum && o.IsNum {
if v.VersionNum == o.VersionNum {
return 0
} else if v.VersionNum > o.VersionNum {
return 1
} else {
return -1
}
} else { // both are Alphas
if v.VersionStr == o.VersionStr {
return 0
} else if v.VersionStr > o.VersionStr {
return 1
} else {
return -1
}
}
}
// PreRelease version to string
func (v PRVersion) String() string {
if v.IsNum {
return strconv.FormatUint(v.VersionNum, 10)
}
return v.VersionStr
}
func containsOnly(s string, set string) bool {
return strings.IndexFunc(s, func(r rune) bool {
return !strings.ContainsRune(set, r)
}) == -1
}
func hasLeadingZeroes(s string) bool {
return len(s) > 1 && s[0] == '0'
}
// NewBuildVersion creates a new valid build version
func NewBuildVersion(s string) (string, error) {
if len(s) == 0 {
return "", errors.New("Buildversion is empty")
}
if !containsOnly(s, alphanum) {
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
}
return s, nil
}

View File

@ -0,0 +1,28 @@
package semver
import (
"sort"
)
// Versions represents multiple versions.
type Versions []Version
// Len returns length of version collection
func (s Versions) Len() int {
return len(s)
}
// Swap swaps two versions inside the collection by its indices
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less checks if version at index i is less than version at index j
func (s Versions) Less(i, j int) bool {
return s[i].LT(s[j])
}
// Sort sorts a slice of versions
func Sort(versions []Version) {
sort.Sort(Versions(versions))
}

View File

@ -0,0 +1,30 @@
package semver
import (
"database/sql/driver"
"fmt"
)
// Scan implements the database/sql.Scanner interface.
func (v *Version) Scan(src interface{}) (err error) {
var str string
switch src := src.(type) {
case string:
str = src
case []byte:
str = string(src)
default:
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
}
if t, err := Parse(str); err == nil {
*v = t
}
return
}
// Value implements the database/sql/driver.Valuer interface.
func (v Version) Value() (driver.Value, error) {
return v.String(), nil
}

View File

@ -0,0 +1,7 @@
package http
import "net/http"
type Client interface {
Do(*http.Request) (*http.Response, error)
}

View File

@ -0,0 +1,156 @@
package http
import (
"encoding/base64"
"encoding/json"
"errors"
"log"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
)
func WriteError(w http.ResponseWriter, code int, msg string) {
e := struct {
Error string `json:"error"`
}{
Error: msg,
}
b, err := json.Marshal(e)
if err != nil {
log.Printf("go-oidc: failed to marshal %#v: %v", e, err)
code = http.StatusInternalServerError
b = []byte(`{"error":"server_error"}`)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(b)
}
// BasicAuth parses a username and password from the request's
// Authorization header. This was pulled from golang master:
// https://codereview.appspot.com/76540043
func BasicAuth(r *http.Request) (username, password string, ok bool) {
auth := r.Header.Get("Authorization")
if auth == "" {
return
}
if !strings.HasPrefix(auth, "Basic ") {
return
}
c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
if err != nil {
return
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
return
}
return cs[:s], cs[s+1:], true
}
func cacheControlMaxAge(hdr string) (time.Duration, bool, error) {
for _, field := range strings.Split(hdr, ",") {
parts := strings.SplitN(strings.TrimSpace(field), "=", 2)
k := strings.ToLower(strings.TrimSpace(parts[0]))
if k != "max-age" {
continue
}
if len(parts) == 1 {
return 0, false, errors.New("max-age has no value")
}
v := strings.TrimSpace(parts[1])
if v == "" {
return 0, false, errors.New("max-age has empty value")
}
age, err := strconv.Atoi(v)
if err != nil {
return 0, false, err
}
if age <= 0 {
return 0, false, nil
}
return time.Duration(age) * time.Second, true, nil
}
return 0, false, nil
}
func expires(date, expires string) (time.Duration, bool, error) {
if date == "" || expires == "" {
return 0, false, nil
}
te, err := time.Parse(time.RFC1123, expires)
if err != nil {
return 0, false, err
}
td, err := time.Parse(time.RFC1123, date)
if err != nil {
return 0, false, err
}
ttl := te.Sub(td)
// headers indicate data already expired, caller should not
// have to care about this case
if ttl <= 0 {
return 0, false, nil
}
return ttl, true, nil
}
func Cacheable(hdr http.Header) (time.Duration, bool, error) {
ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control"))
if err != nil || ok {
return ttl, ok, err
}
return expires(hdr.Get("Date"), hdr.Get("Expires"))
}
// MergeQuery appends additional query values to an existing URL.
func MergeQuery(u url.URL, q url.Values) url.URL {
uv := u.Query()
for k, vs := range q {
for _, v := range vs {
uv.Add(k, v)
}
}
u.RawQuery = uv.Encode()
return u
}
// NewResourceLocation appends a resource id to the end of the requested URL path.
func NewResourceLocation(reqURL *url.URL, id string) string {
var u url.URL
u = *reqURL
u.Path = path.Join(u.Path, id)
u.RawQuery = ""
u.Fragment = ""
return u.String()
}
// CopyRequest returns a clone of the provided *http.Request.
// The returned object is a shallow copy of the struct and a
// deep copy of its Header field.
func CopyRequest(r *http.Request) *http.Request {
r2 := *r
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return &r2
}

View File

@ -0,0 +1,29 @@
package http
import (
"errors"
"net/url"
)
// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty
// since `url.Parse("")` does not return an error. Must contian a scheme and a host.
func ParseNonEmptyURL(u string) (*url.URL, error) {
if u == "" {
return nil, errors.New("url is empty")
}
ur, err := url.Parse(u)
if err != nil {
return nil, err
}
if ur.Scheme == "" {
return nil, errors.New("url scheme is empty")
}
if ur.Host == "" {
return nil, errors.New("url host is empty")
}
return ur, nil
}

View File

@ -0,0 +1,126 @@
package jose
import (
"encoding/json"
"fmt"
"math"
"time"
)
type Claims map[string]interface{}
func (c Claims) Add(name string, value interface{}) {
c[name] = value
}
func (c Claims) StringClaim(name string) (string, bool, error) {
cl, ok := c[name]
if !ok {
return "", false, nil
}
v, ok := cl.(string)
if !ok {
return "", false, fmt.Errorf("unable to parse claim as string: %v", name)
}
return v, true, nil
}
func (c Claims) StringsClaim(name string) ([]string, bool, error) {
cl, ok := c[name]
if !ok {
return nil, false, nil
}
if v, ok := cl.([]string); ok {
return v, true, nil
}
// When unmarshaled, []string will become []interface{}.
if v, ok := cl.([]interface{}); ok {
var ret []string
for _, vv := range v {
str, ok := vv.(string)
if !ok {
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
}
ret = append(ret, str)
}
return ret, true, nil
}
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
}
func (c Claims) Int64Claim(name string) (int64, bool, error) {
cl, ok := c[name]
if !ok {
return 0, false, nil
}
v, ok := cl.(int64)
if !ok {
vf, ok := cl.(float64)
if !ok {
return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name)
}
v = int64(vf)
}
return v, true, nil
}
func (c Claims) Float64Claim(name string) (float64, bool, error) {
cl, ok := c[name]
if !ok {
return 0, false, nil
}
v, ok := cl.(float64)
if !ok {
vi, ok := cl.(int64)
if !ok {
return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name)
}
v = float64(vi)
}
return v, true, nil
}
func (c Claims) TimeClaim(name string) (time.Time, bool, error) {
v, ok, err := c.Float64Claim(name)
if !ok || err != nil {
return time.Time{}, ok, err
}
s := math.Trunc(v)
ns := (v - s) * math.Pow(10, 9)
return time.Unix(int64(s), int64(ns)).UTC(), true, nil
}
func decodeClaims(payload []byte) (Claims, error) {
var c Claims
if err := json.Unmarshal(payload, &c); err != nil {
return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err)
}
return c, nil
}
func marshalClaims(c Claims) ([]byte, error) {
b, err := json.Marshal(c)
if err != nil {
return nil, err
}
return b, nil
}
func encodeClaims(c Claims) (string, error) {
b, err := marshalClaims(c)
if err != nil {
return "", err
}
return encodeSegment(b), nil
}

View File

@ -0,0 +1,112 @@
package jose
import (
"encoding/base64"
"encoding/json"
"fmt"
"strings"
)
const (
HeaderMediaType = "typ"
HeaderKeyAlgorithm = "alg"
HeaderKeyID = "kid"
)
const (
// Encryption Algorithm Header Parameter Values for JWS
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6
AlgHS256 = "HS256"
AlgHS384 = "HS384"
AlgHS512 = "HS512"
AlgRS256 = "RS256"
AlgRS384 = "RS384"
AlgRS512 = "RS512"
AlgES256 = "ES256"
AlgES384 = "ES384"
AlgES512 = "ES512"
AlgPS256 = "PS256"
AlgPS384 = "PS384"
AlgPS512 = "PS512"
AlgNone = "none"
)
const (
// Algorithm Header Parameter Values for JWE
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1
AlgRSA15 = "RSA1_5"
AlgRSAOAEP = "RSA-OAEP"
AlgRSAOAEP256 = "RSA-OAEP-256"
AlgA128KW = "A128KW"
AlgA192KW = "A192KW"
AlgA256KW = "A256KW"
AlgDir = "dir"
AlgECDHES = "ECDH-ES"
AlgECDHESA128KW = "ECDH-ES+A128KW"
AlgECDHESA192KW = "ECDH-ES+A192KW"
AlgECDHESA256KW = "ECDH-ES+A256KW"
AlgA128GCMKW = "A128GCMKW"
AlgA192GCMKW = "A192GCMKW"
AlgA256GCMKW = "A256GCMKW"
AlgPBES2HS256A128KW = "PBES2-HS256+A128KW"
AlgPBES2HS384A192KW = "PBES2-HS384+A192KW"
AlgPBES2HS512A256KW = "PBES2-HS512+A256KW"
)
const (
// Encryption Algorithm Header Parameter Values for JWE
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22
EncA128CBCHS256 = "A128CBC-HS256"
EncA128CBCHS384 = "A128CBC-HS384"
EncA256CBCHS512 = "A256CBC-HS512"
EncA128GCM = "A128GCM"
EncA192GCM = "A192GCM"
EncA256GCM = "A256GCM"
)
type JOSEHeader map[string]string
func (j JOSEHeader) Validate() error {
if _, exists := j[HeaderKeyAlgorithm]; !exists {
return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm)
}
return nil
}
func decodeHeader(seg string) (JOSEHeader, error) {
b, err := decodeSegment(seg)
if err != nil {
return nil, err
}
var h JOSEHeader
err = json.Unmarshal(b, &h)
if err != nil {
return nil, err
}
return h, nil
}
func encodeHeader(h JOSEHeader) (string, error) {
b, err := json.Marshal(h)
if err != nil {
return "", err
}
return encodeSegment(b), nil
}
// Decode JWT specific base64url encoding with padding stripped
func decodeSegment(seg string) ([]byte, error) {
if l := len(seg) % 4; l != 0 {
seg += strings.Repeat("=", 4-l)
}
return base64.URLEncoding.DecodeString(seg)
}
// Encode JWT specific base64url encoding with padding stripped
func encodeSegment(seg []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
}

View File

@ -0,0 +1,135 @@
package jose
import (
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/json"
"math/big"
"strings"
)
// JSON Web Key
// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5
type JWK struct {
ID string
Type string
Alg string
Use string
Exponent int
Modulus *big.Int
Secret []byte
}
type jwkJSON struct {
ID string `json:"kid"`
Type string `json:"kty"`
Alg string `json:"alg"`
Use string `json:"use"`
Exponent string `json:"e"`
Modulus string `json:"n"`
}
func (j *JWK) MarshalJSON() ([]byte, error) {
t := jwkJSON{
ID: j.ID,
Type: j.Type,
Alg: j.Alg,
Use: j.Use,
Exponent: encodeExponent(j.Exponent),
Modulus: encodeModulus(j.Modulus),
}
return json.Marshal(&t)
}
func (j *JWK) UnmarshalJSON(data []byte) error {
var t jwkJSON
err := json.Unmarshal(data, &t)
if err != nil {
return err
}
e, err := decodeExponent(t.Exponent)
if err != nil {
return err
}
n, err := decodeModulus(t.Modulus)
if err != nil {
return err
}
j.ID = t.ID
j.Type = t.Type
j.Alg = t.Alg
j.Use = t.Use
j.Exponent = e
j.Modulus = n
return nil
}
type JWKSet struct {
Keys []JWK `json:"keys"`
}
func decodeExponent(e string) (int, error) {
decE, err := decodeBase64URLPaddingOptional(e)
if err != nil {
return 0, err
}
var eBytes []byte
if len(decE) < 8 {
eBytes = make([]byte, 8-len(decE), 8)
eBytes = append(eBytes, decE...)
} else {
eBytes = decE
}
eReader := bytes.NewReader(eBytes)
var E uint64
err = binary.Read(eReader, binary.BigEndian, &E)
if err != nil {
return 0, err
}
return int(E), nil
}
func encodeExponent(e int) string {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(e))
var idx int
for ; idx < 8; idx++ {
if b[idx] != 0x0 {
break
}
}
return base64.URLEncoding.EncodeToString(b[idx:])
}
// Turns a URL encoded modulus of a key into a big int.
func decodeModulus(n string) (*big.Int, error) {
decN, err := decodeBase64URLPaddingOptional(n)
if err != nil {
return nil, err
}
N := big.NewInt(0)
N.SetBytes(decN)
return N, nil
}
func encodeModulus(n *big.Int) string {
return base64.URLEncoding.EncodeToString(n.Bytes())
}
// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not.
// The stdlib version currently doesn't handle this.
// We can get rid of this is if this bug:
// https://github.com/golang/go/issues/4237
// ever closes.
func decodeBase64URLPaddingOptional(e string) ([]byte, error) {
if m := len(e) % 4; m != 0 {
e += strings.Repeat("=", 4-m)
}
return base64.URLEncoding.DecodeString(e)
}

View File

@ -0,0 +1,51 @@
package jose
import (
"fmt"
"strings"
)
type JWS struct {
RawHeader string
Header JOSEHeader
RawPayload string
Payload []byte
Signature []byte
}
// Given a raw encoded JWS token parses it and verifies the structure.
func ParseJWS(raw string) (JWS, error) {
parts := strings.Split(raw, ".")
if len(parts) != 3 {
return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts))
}
rawSig := parts[2]
jws := JWS{
RawHeader: parts[0],
RawPayload: parts[1],
}
header, err := decodeHeader(jws.RawHeader)
if err != nil {
return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err)
}
if err = header.Validate(); err != nil {
return JWS{}, fmt.Errorf("malformed JWS, %s", err)
}
jws.Header = header
payload, err := decodeSegment(jws.RawPayload)
if err != nil {
return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err)
}
jws.Payload = payload
sig, err := decodeSegment(rawSig)
if err != nil {
return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err)
}
jws.Signature = sig
return jws, nil
}

View File

@ -0,0 +1,82 @@
package jose
import "strings"
type JWT JWS
func ParseJWT(token string) (jwt JWT, err error) {
jws, err := ParseJWS(token)
if err != nil {
return
}
return JWT(jws), nil
}
func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) {
jwt = JWT{}
jwt.Header = header
jwt.Header[HeaderMediaType] = "JWT"
claimBytes, err := marshalClaims(claims)
if err != nil {
return
}
jwt.Payload = claimBytes
eh, err := encodeHeader(header)
if err != nil {
return
}
jwt.RawHeader = eh
ec, err := encodeClaims(claims)
if err != nil {
return
}
jwt.RawPayload = ec
return
}
func (j *JWT) KeyID() (string, bool) {
kID, ok := j.Header[HeaderKeyID]
return kID, ok
}
func (j *JWT) Claims() (Claims, error) {
return decodeClaims(j.Payload)
}
// Encoded data part of the token which may be signed.
func (j *JWT) Data() string {
return strings.Join([]string{j.RawHeader, j.RawPayload}, ".")
}
// Full encoded JWT token string in format: header.claims.signature
func (j *JWT) Encode() string {
d := j.Data()
s := encodeSegment(j.Signature)
return strings.Join([]string{d, s}, ".")
}
func NewSignedJWT(claims Claims, s Signer) (*JWT, error) {
header := JOSEHeader{
HeaderKeyAlgorithm: s.Alg(),
HeaderKeyID: s.ID(),
}
jwt, err := NewJWT(header, claims)
if err != nil {
return nil, err
}
sig, err := s.Sign([]byte(jwt.Data()))
if err != nil {
return nil, err
}
jwt.Signature = sig
return &jwt, nil
}

View File

@ -0,0 +1,24 @@
package jose
import (
"fmt"
)
type Verifier interface {
ID() string
Alg() string
Verify(sig []byte, data []byte) error
}
type Signer interface {
Verifier
Sign(data []byte) (sig []byte, err error)
}
func NewVerifier(jwk JWK) (Verifier, error) {
if jwk.Type != "RSA" {
return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
}
return NewVerifierRSA(jwk)
}

View File

@ -0,0 +1,67 @@
package jose
import (
"bytes"
"crypto"
"crypto/hmac"
_ "crypto/sha256"
"errors"
"fmt"
)
type VerifierHMAC struct {
KeyID string
Hash crypto.Hash
Secret []byte
}
type SignerHMAC struct {
VerifierHMAC
}
func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
if jwk.Alg != "" && jwk.Alg != "HS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}
v := VerifierHMAC{
KeyID: jwk.ID,
Secret: jwk.Secret,
Hash: crypto.SHA256,
}
return &v, nil
}
func (v *VerifierHMAC) ID() string {
return v.KeyID
}
func (v *VerifierHMAC) Alg() string {
return "HS256"
}
func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
h := hmac.New(v.Hash.New, v.Secret)
h.Write(data)
if !bytes.Equal(sig, h.Sum(nil)) {
return errors.New("invalid hmac signature")
}
return nil
}
func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
return &SignerHMAC{
VerifierHMAC: VerifierHMAC{
KeyID: kid,
Secret: secret,
Hash: crypto.SHA256,
},
}
}
func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
h := hmac.New(s.Hash.New, s.Secret)
h.Write(data)
return h.Sum(nil), nil
}

View File

@ -0,0 +1,67 @@
package jose
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"fmt"
)
type VerifierRSA struct {
KeyID string
Hash crypto.Hash
PublicKey rsa.PublicKey
}
type SignerRSA struct {
PrivateKey rsa.PrivateKey
VerifierRSA
}
func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) {
if jwk.Alg != "" && jwk.Alg != "RS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}
v := VerifierRSA{
KeyID: jwk.ID,
PublicKey: rsa.PublicKey{
N: jwk.Modulus,
E: jwk.Exponent,
},
Hash: crypto.SHA256,
}
return &v, nil
}
func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA {
return &SignerRSA{
PrivateKey: key,
VerifierRSA: VerifierRSA{
KeyID: kid,
PublicKey: key.PublicKey,
Hash: crypto.SHA256,
},
}
}
func (v *VerifierRSA) ID() string {
return v.KeyID
}
func (v *VerifierRSA) Alg() string {
return "RS256"
}
func (v *VerifierRSA) Verify(sig []byte, data []byte) error {
h := v.Hash.New()
h.Write(data)
return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig)
}
func (s *SignerRSA) Sign(data []byte) ([]byte, error) {
h := s.Hash.New()
h.Write(data)
return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil))
}

View File

@ -0,0 +1,153 @@
package key
import (
"crypto/rand"
"crypto/rsa"
"encoding/hex"
"encoding/json"
"io"
"time"
"github.com/coreos/go-oidc/jose"
)
func NewPublicKey(jwk jose.JWK) *PublicKey {
return &PublicKey{jwk: jwk}
}
type PublicKey struct {
jwk jose.JWK
}
func (k *PublicKey) MarshalJSON() ([]byte, error) {
return json.Marshal(&k.jwk)
}
func (k *PublicKey) UnmarshalJSON(data []byte) error {
var jwk jose.JWK
if err := json.Unmarshal(data, &jwk); err != nil {
return err
}
k.jwk = jwk
return nil
}
func (k *PublicKey) ID() string {
return k.jwk.ID
}
func (k *PublicKey) Verifier() (jose.Verifier, error) {
return jose.NewVerifierRSA(k.jwk)
}
type PrivateKey struct {
KeyID string
PrivateKey *rsa.PrivateKey
}
func (k *PrivateKey) ID() string {
return k.KeyID
}
func (k *PrivateKey) Signer() jose.Signer {
return jose.NewSignerRSA(k.ID(), *k.PrivateKey)
}
func (k *PrivateKey) JWK() jose.JWK {
return jose.JWK{
ID: k.KeyID,
Type: "RSA",
Alg: "RS256",
Use: "sig",
Exponent: k.PrivateKey.PublicKey.E,
Modulus: k.PrivateKey.PublicKey.N,
}
}
type KeySet interface {
ExpiresAt() time.Time
}
type PublicKeySet struct {
keys []PublicKey
index map[string]*PublicKey
expiresAt time.Time
}
func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet {
keys := make([]PublicKey, len(jwks))
index := make(map[string]*PublicKey)
for i, jwk := range jwks {
keys[i] = *NewPublicKey(jwk)
index[keys[i].ID()] = &keys[i]
}
return &PublicKeySet{
keys: keys,
index: index,
expiresAt: exp,
}
}
func (s *PublicKeySet) ExpiresAt() time.Time {
return s.expiresAt
}
func (s *PublicKeySet) Keys() []PublicKey {
return s.keys
}
func (s *PublicKeySet) Key(id string) *PublicKey {
return s.index[id]
}
type PrivateKeySet struct {
keys []*PrivateKey
ActiveKeyID string
expiresAt time.Time
}
func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet {
return &PrivateKeySet{
keys: keys,
ActiveKeyID: keys[0].ID(),
expiresAt: exp.UTC(),
}
}
func (s *PrivateKeySet) Keys() []*PrivateKey {
return s.keys
}
func (s *PrivateKeySet) ExpiresAt() time.Time {
return s.expiresAt
}
func (s *PrivateKeySet) Active() *PrivateKey {
for i, k := range s.keys {
if k.ID() == s.ActiveKeyID {
return s.keys[i]
}
}
return nil
}
type GeneratePrivateKeyFunc func() (*PrivateKey, error)
func GeneratePrivateKey() (*PrivateKey, error) {
pk, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, err
}
keyID := make([]byte, 20)
if _, err := io.ReadFull(rand.Reader, keyID); err != nil {
return nil, err
}
k := PrivateKey{
KeyID: hex.EncodeToString(keyID),
PrivateKey: pk,
}
return &k, nil
}

View File

@ -0,0 +1,99 @@
package key
import (
"errors"
"time"
"github.com/jonboulle/clockwork"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/pkg/health"
)
type PrivateKeyManager interface {
ExpiresAt() time.Time
Signer() (jose.Signer, error)
JWKs() ([]jose.JWK, error)
PublicKeys() ([]PublicKey, error)
WritableKeySetRepo
health.Checkable
}
func NewPrivateKeyManager() PrivateKeyManager {
return &privateKeyManager{
clock: clockwork.NewRealClock(),
}
}
type privateKeyManager struct {
keySet *PrivateKeySet
clock clockwork.Clock
}
func (m *privateKeyManager) ExpiresAt() time.Time {
if m.keySet == nil {
return m.clock.Now().UTC()
}
return m.keySet.ExpiresAt()
}
func (m *privateKeyManager) Signer() (jose.Signer, error) {
if err := m.Healthy(); err != nil {
return nil, err
}
return m.keySet.Active().Signer(), nil
}
func (m *privateKeyManager) JWKs() ([]jose.JWK, error) {
if err := m.Healthy(); err != nil {
return nil, err
}
keys := m.keySet.Keys()
jwks := make([]jose.JWK, len(keys))
for i, k := range keys {
jwks[i] = k.JWK()
}
return jwks, nil
}
func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) {
jwks, err := m.JWKs()
if err != nil {
return nil, err
}
keys := make([]PublicKey, len(jwks))
for i, jwk := range jwks {
keys[i] = *NewPublicKey(jwk)
}
return keys, nil
}
func (m *privateKeyManager) Healthy() error {
if m.keySet == nil {
return errors.New("private key manager uninitialized")
}
if len(m.keySet.Keys()) == 0 {
return errors.New("private key manager zero keys")
}
if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) {
return errors.New("private key manager keys expired")
}
return nil
}
func (m *privateKeyManager) Set(keySet KeySet) error {
privKeySet, ok := keySet.(*PrivateKeySet)
if !ok {
return errors.New("unable to cast to PrivateKeySet")
}
m.keySet = privKeySet
return nil
}

View File

@ -0,0 +1,55 @@
package key
import (
"errors"
"sync"
)
var ErrorNoKeys = errors.New("no keys found")
type WritableKeySetRepo interface {
Set(KeySet) error
}
type ReadableKeySetRepo interface {
Get() (KeySet, error)
}
type PrivateKeySetRepo interface {
WritableKeySetRepo
ReadableKeySetRepo
}
func NewPrivateKeySetRepo() PrivateKeySetRepo {
return &memPrivateKeySetRepo{}
}
type memPrivateKeySetRepo struct {
mu sync.RWMutex
pks PrivateKeySet
}
func (r *memPrivateKeySetRepo) Set(ks KeySet) error {
pks, ok := ks.(*PrivateKeySet)
if !ok {
return errors.New("unable to cast to PrivateKeySet")
} else if pks == nil {
return errors.New("nil KeySet")
}
r.mu.Lock()
defer r.mu.Unlock()
r.pks = *pks
return nil
}
func (r *memPrivateKeySetRepo) Get() (KeySet, error) {
r.mu.RLock()
defer r.mu.RUnlock()
if r.pks.keys == nil {
return nil, ErrorNoKeys
}
return KeySet(&r.pks), nil
}

View File

@ -0,0 +1,159 @@
package key
import (
"errors"
"log"
"time"
ptime "github.com/coreos/pkg/timeutil"
"github.com/jonboulle/clockwork"
)
var (
ErrorPrivateKeysExpired = errors.New("private keys have expired")
)
func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator {
return &PrivateKeyRotator{
repo: repo,
ttl: ttl,
keep: 2,
generateKey: GeneratePrivateKey,
clock: clockwork.NewRealClock(),
}
}
type PrivateKeyRotator struct {
repo PrivateKeySetRepo
generateKey GeneratePrivateKeyFunc
clock clockwork.Clock
keep int
ttl time.Duration
}
func (r *PrivateKeyRotator) expiresAt() time.Time {
return r.clock.Now().UTC().Add(r.ttl)
}
func (r *PrivateKeyRotator) Healthy() error {
pks, err := r.privateKeySet()
if err != nil {
return err
}
if r.clock.Now().After(pks.ExpiresAt()) {
return ErrorPrivateKeysExpired
}
return nil
}
func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) {
ks, err := r.repo.Get()
if err != nil {
return nil, err
}
pks, ok := ks.(*PrivateKeySet)
if !ok {
return nil, errors.New("unable to cast to PrivateKeySet")
}
return pks, nil
}
func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) {
pks, err := r.privateKeySet()
if err == ErrorNoKeys {
return 0, nil
}
if err != nil {
return 0, err
}
now := r.clock.Now()
// Ideally, we want to rotate after half the TTL has elapsed.
idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2)
// If we are past the ideal rotation time, rotate immediatly.
return max(0, idealRotationTime.Sub(now)), nil
}
func max(a, b time.Duration) time.Duration {
if a > b {
return a
}
return b
}
func (r *PrivateKeyRotator) Run() chan struct{} {
attempt := func() {
k, err := r.generateKey()
if err != nil {
log.Printf("go-oidc: failed generating signing key: %v", err)
return
}
exp := r.expiresAt()
if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil {
log.Printf("go-oidc: key rotation failed: %v", err)
return
}
}
stop := make(chan struct{})
go func() {
for {
var nextRotation time.Duration
var sleep time.Duration
var err error
for {
if nextRotation, err = r.nextRotation(); err == nil {
break
}
sleep = ptime.ExpBackoff(sleep, time.Minute)
log.Printf("go-oidc: error getting nextRotation, retrying in %v: %v", sleep, err)
time.Sleep(sleep)
}
select {
case <-r.clock.After(nextRotation):
attempt()
case <-stop:
return
}
}
}()
return stop
}
func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error {
ks, err := repo.Get()
if err != nil && err != ErrorNoKeys {
return err
}
var keys []*PrivateKey
if ks != nil {
pks, ok := ks.(*PrivateKeySet)
if !ok {
return errors.New("unable to cast to PrivateKeySet")
}
keys = pks.Keys()
}
keys = append([]*PrivateKey{k}, keys...)
if l := len(keys); l > keep {
keys = keys[0:keep]
}
nks := PrivateKeySet{
keys: keys,
ActiveKeyID: k.ID(),
expiresAt: exp,
}
return repo.Set(KeySet(&nks))
}

View File

@ -0,0 +1,91 @@
package key
import (
"errors"
"log"
"time"
"github.com/jonboulle/clockwork"
"github.com/coreos/pkg/timeutil"
)
func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer {
return &KeySetSyncer{
readable: r,
writable: w,
clock: clockwork.NewRealClock(),
}
}
type KeySetSyncer struct {
readable ReadableKeySetRepo
writable WritableKeySetRepo
clock clockwork.Clock
}
func (s *KeySetSyncer) Run() chan struct{} {
stop := make(chan struct{})
go func() {
var failing bool
var next time.Duration
for {
exp, err := syncKeySet(s.readable, s.writable, s.clock)
if err != nil || exp == 0 {
if !failing {
failing = true
next = time.Second
} else {
next = timeutil.ExpBackoff(next, time.Minute)
}
if exp == 0 {
log.Printf("Synced to already expired key set, retrying in %v: %v", next, err)
} else {
log.Printf("Failed syncing key set, retrying in %v: %v", next, err)
}
} else {
failing = false
next = exp / 2
}
select {
case <-s.clock.After(next):
continue
case <-stop:
return
}
}
}()
return stop
}
func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) {
return syncKeySet(r, w, clockwork.NewRealClock())
}
// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
// If keyset has already expired, returns a zero duration.
func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
var ks KeySet
ks, err = r.Get()
if err != nil {
return
}
if ks == nil {
err = errors.New("no source KeySet")
return
}
if err = w.Set(ks); err != nil {
return
}
now := clock.Now()
if ks.ExpiresAt().After(now) {
exp = ks.ExpiresAt().Sub(now)
}
return
}

View File

@ -0,0 +1,29 @@
package oauth2
const (
ErrorAccessDenied = "access_denied"
ErrorInvalidClient = "invalid_client"
ErrorInvalidGrant = "invalid_grant"
ErrorInvalidRequest = "invalid_request"
ErrorServerError = "server_error"
ErrorUnauthorizedClient = "unauthorized_client"
ErrorUnsupportedGrantType = "unsupported_grant_type"
ErrorUnsupportedResponseType = "unsupported_response_type"
)
type Error struct {
Type string `json:"error"`
Description string `json:"error_description,omitempty"`
State string `json:"state,omitempty"`
}
func (e *Error) Error() string {
if e.Description != "" {
return e.Type + ": " + e.Description
}
return e.Type
}
func NewError(typ string) *Error {
return &Error{Type: typ}
}

View File

@ -0,0 +1,416 @@
package oauth2
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"mime"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
phttp "github.com/coreos/go-oidc/http"
)
// ResponseTypesEqual compares two response_type values. If either
// contains a space, it is treated as an unordered list. For example,
// comparing "code id_token" and "id_token code" would evaluate to true.
func ResponseTypesEqual(r1, r2 string) bool {
if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") {
// fast route, no split needed
return r1 == r2
}
// split, sort, and compare
r1Fields := strings.Fields(r1)
r2Fields := strings.Fields(r2)
if len(r1Fields) != len(r2Fields) {
return false
}
sort.Strings(r1Fields)
sort.Strings(r2Fields)
for i, r1Field := range r1Fields {
if r1Field != r2Fields[i] {
return false
}
}
return true
}
const (
// OAuth2.0 response types registered by OIDC.
//
// See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents
ResponseTypeCode = "code"
ResponseTypeCodeIDToken = "code id_token"
ResponseTypeCodeIDTokenToken = "code id_token token"
ResponseTypeIDToken = "id_token"
ResponseTypeIDTokenToken = "id_token token"
ResponseTypeToken = "token"
ResponseTypeNone = "none"
)
const (
GrantTypeAuthCode = "authorization_code"
GrantTypeClientCreds = "client_credentials"
GrantTypeUserCreds = "password"
GrantTypeImplicit = "implicit"
GrantTypeRefreshToken = "refresh_token"
AuthMethodClientSecretPost = "client_secret_post"
AuthMethodClientSecretBasic = "client_secret_basic"
AuthMethodClientSecretJWT = "client_secret_jwt"
AuthMethodPrivateKeyJWT = "private_key_jwt"
)
type Config struct {
Credentials ClientCredentials
Scope []string
RedirectURL string
AuthURL string
TokenURL string
// Must be one of the AuthMethodXXX methods above. Right now, only
// AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported.
AuthMethod string
}
type Client struct {
hc phttp.Client
creds ClientCredentials
scope []string
authURL *url.URL
redirectURL *url.URL
tokenURL *url.URL
authMethod string
}
type ClientCredentials struct {
ID string
Secret string
}
func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) {
if len(cfg.Credentials.ID) == 0 {
err = errors.New("missing client id")
return
}
if len(cfg.Credentials.Secret) == 0 {
err = errors.New("missing client secret")
return
}
if cfg.AuthMethod == "" {
cfg.AuthMethod = AuthMethodClientSecretBasic
} else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic {
err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod)
return
}
au, err := phttp.ParseNonEmptyURL(cfg.AuthURL)
if err != nil {
return
}
tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL)
if err != nil {
return
}
// Allow empty redirect URL in the case where the client
// only needs to verify a given token.
ru, err := url.Parse(cfg.RedirectURL)
if err != nil {
return
}
c = &Client{
creds: cfg.Credentials,
scope: cfg.Scope,
redirectURL: ru,
authURL: au,
tokenURL: tu,
hc: hc,
authMethod: cfg.AuthMethod,
}
return
}
// Return the embedded HTTP client
func (c *Client) HttpClient() phttp.Client {
return c.hc
}
// Generate the url for initial redirect to oauth provider.
func (c *Client) AuthCodeURL(state, accessType, prompt string) string {
v := c.commonURLValues()
v.Set("state", state)
if strings.ToLower(accessType) == "offline" {
v.Set("access_type", "offline")
}
if prompt != "" {
v.Set("prompt", prompt)
}
v.Set("response_type", "code")
q := v.Encode()
u := *c.authURL
if u.RawQuery == "" {
u.RawQuery = q
} else {
u.RawQuery += "&" + q
}
return u.String()
}
func (c *Client) commonURLValues() url.Values {
return url.Values{
"redirect_uri": {c.redirectURL.String()},
"scope": {strings.Join(c.scope, " ")},
"client_id": {c.creds.ID},
}
}
func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) {
var req *http.Request
var err error
switch c.authMethod {
case AuthMethodClientSecretPost:
values.Set("client_secret", c.creds.Secret)
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
if err != nil {
return nil, err
}
case AuthMethodClientSecretBasic:
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
if err != nil {
return nil, err
}
encodedID := url.QueryEscape(c.creds.ID)
encodedSecret := url.QueryEscape(c.creds.Secret)
req.SetBasicAuth(encodedID, encodedSecret)
default:
panic("misconfigured client: auth method not supported")
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req, nil
}
// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type.
// May not be supported by all OAuth2 servers.
func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) {
v := url.Values{
"scope": {strings.Join(scope, " ")},
"grant_type": {GrantTypeClientCreds},
}
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
if err != nil {
return
}
resp, err := c.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
return parseTokenResponse(resp)
}
// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type
// May not be supported by all OAuth2 servers.
func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) {
v := url.Values{
"scope": {strings.Join(c.scope, " ")},
"grant_type": {GrantTypeUserCreds},
"username": {username},
"password": {password},
}
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
if err != nil {
return
}
resp, err := c.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
return parseTokenResponse(resp)
}
// RequestToken requests a token from the Token Endpoint with the specified grantType.
// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code.
// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token.
func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) {
v := c.commonURLValues()
v.Set("grant_type", grantType)
v.Set("client_secret", c.creds.Secret)
switch grantType {
case GrantTypeAuthCode:
v.Set("code", value)
case GrantTypeRefreshToken:
v.Set("refresh_token", value)
default:
err = fmt.Errorf("unsupported grant_type: %v", grantType)
return
}
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
if err != nil {
return
}
resp, err := c.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
return parseTokenResponse(resp)
}
func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299
contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
if err != nil {
return
}
result = TokenResponse{
RawBody: body,
}
newError := func(typ, desc, state string) error {
if typ == "" {
return fmt.Errorf("unrecognized error %s", body)
}
return &Error{typ, desc, state}
}
if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" {
var vals url.Values
vals, err = url.ParseQuery(string(body))
if err != nil {
return
}
if error := vals.Get("error"); error != "" || badStatusCode {
err = newError(error, vals.Get("error_description"), vals.Get("state"))
return
}
e := vals.Get("expires_in")
if e == "" {
e = vals.Get("expires")
}
if e != "" {
result.Expires, err = strconv.Atoi(e)
if err != nil {
return
}
}
result.AccessToken = vals.Get("access_token")
result.TokenType = vals.Get("token_type")
result.IDToken = vals.Get("id_token")
result.RefreshToken = vals.Get("refresh_token")
result.Scope = vals.Get("scope")
} else {
var r struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
RefreshToken string `json:"refresh_token"`
Scope string `json:"scope"`
State string `json:"state"`
ExpiresIn json.Number `json:"expires_in"` // Azure AD returns string
Expires int `json:"expires"`
Error string `json:"error"`
Desc string `json:"error_description"`
}
if err = json.Unmarshal(body, &r); err != nil {
return
}
if r.Error != "" || badStatusCode {
err = newError(r.Error, r.Desc, r.State)
return
}
result.AccessToken = r.AccessToken
result.TokenType = r.TokenType
result.IDToken = r.IDToken
result.RefreshToken = r.RefreshToken
result.Scope = r.Scope
if expiresIn, err := r.ExpiresIn.Int64(); err != nil {
result.Expires = r.Expires
} else {
result.Expires = int(expiresIn)
}
}
return
}
type TokenResponse struct {
AccessToken string
TokenType string
Expires int
IDToken string
RefreshToken string // OPTIONAL.
Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED.
RawBody []byte // In case callers need some other non-standard info from the token response
}
type AuthCodeRequest struct {
ResponseType string
ClientID string
RedirectURL *url.URL
Scope []string
State string
}
func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) {
acr := AuthCodeRequest{
ResponseType: q.Get("response_type"),
ClientID: q.Get("client_id"),
State: q.Get("state"),
Scope: make([]string, 0),
}
qs := strings.TrimSpace(q.Get("scope"))
if qs != "" {
acr.Scope = strings.Split(qs, " ")
}
err := func() error {
if acr.ClientID == "" {
return NewError(ErrorInvalidRequest)
}
redirectURL := q.Get("redirect_uri")
if redirectURL != "" {
ru, err := url.Parse(redirectURL)
if err != nil {
return NewError(ErrorInvalidRequest)
}
acr.RedirectURL = ru
}
return nil
}()
return acr, err
}

View File

@ -0,0 +1,846 @@
package oidc
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/mail"
"net/url"
"sync"
"time"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/key"
"github.com/coreos/go-oidc/oauth2"
)
const (
// amount of time that must pass after the last key sync
// completes before another attempt may begin
keySyncWindow = 5 * time.Second
)
var (
DefaultScope = []string{"openid", "email", "profile"}
supportedAuthMethods = map[string]struct{}{
oauth2.AuthMethodClientSecretBasic: struct{}{},
oauth2.AuthMethodClientSecretPost: struct{}{},
}
)
type ClientCredentials oauth2.ClientCredentials
type ClientIdentity struct {
Credentials ClientCredentials
Metadata ClientMetadata
}
type JWAOptions struct {
// SigningAlg specifies an JWA alg for signing JWTs.
//
// Specifying this field implies different actions depending on the context. It may
// require objects be serialized and signed as a JWT instead of plain JSON, or
// require an existing JWT object use the specified alg.
//
// See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
SigningAlg string
// EncryptionAlg, if provided, specifies that the returned or sent object be stored
// (or nested) within a JWT object and encrypted with the provided JWA alg.
EncryptionAlg string
// EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If
// EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults
// to A128CBC-HS256.
//
// If EncryptionEnc is provided EncryptionAlg must also be specified.
EncryptionEnc string
}
func (opt JWAOptions) valid() error {
if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" {
return errors.New("encryption encoding provided with no encryption algorithm")
}
return nil
}
func (opt JWAOptions) defaults() JWAOptions {
if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" {
opt.EncryptionEnc = jose.EncA128CBCHS256
}
return opt
}
var (
// Ensure ClientMetadata satisfies these interfaces.
_ json.Marshaler = &ClientMetadata{}
_ json.Unmarshaler = &ClientMetadata{}
)
// ClientMetadata holds metadata that the authorization server associates
// with a client identifier. The fields range from human-facing display
// strings such as client name, to items that impact the security of the
// protocol, such as the list of valid redirect URIs.
//
// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
//
// TODO: support language specific claim representations
// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts
type ClientMetadata struct {
RedirectURIs []url.URL // Required
// A list of OAuth 2.0 "response_type" values that the client wishes to restrict
// itself to. Either "code", "token", or another registered extension.
//
// If omitted, only "code" will be used.
ResponseTypes []string
// A list of OAuth 2.0 grant types the client wishes to restrict itself to.
// The grant type values used by OIDC are "authorization_code", "implicit",
// and "refresh_token".
//
// If ommitted, only "authorization_code" will be used.
GrantTypes []string
// "native" or "web". If omitted, "web".
ApplicationType string
// List of email addresses.
Contacts []mail.Address
// Name of client to be presented to the end-user.
ClientName string
// URL that references a logo for the Client application.
LogoURI *url.URL
// URL of the home page of the Client.
ClientURI *url.URL
// Profile data policies and terms of use to be provided to the end user.
PolicyURI *url.URL
TermsOfServiceURI *url.URL
// URL to or the value of the client's JSON Web Key Set document.
JWKSURI *url.URL
JWKS *jose.JWKSet
// URL referencing a flie with a single JSON array of redirect URIs.
SectorIdentifierURI *url.URL
SubjectType string
// Options to restrict the JWS alg and enc values used for server responses and requests.
IDTokenResponseOptions JWAOptions
UserInfoResponseOptions JWAOptions
RequestObjectOptions JWAOptions
// Client requested authorization method and signing options for the token endpoint.
//
// Defaults to "client_secret_basic"
TokenEndpointAuthMethod string
TokenEndpointAuthSigningAlg string
// DefaultMaxAge specifies the maximum amount of time in seconds before an authorized
// user must reauthroize.
//
// If 0, no limitation is placed on the maximum.
DefaultMaxAge int64
// RequireAuthTime specifies if the auth_time claim in the ID token is required.
RequireAuthTime bool
// Default Authentication Context Class Reference values for authentication requests.
DefaultACRValues []string
// URI that a third party can use to initiate a login by the relaying party.
//
// See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin
InitiateLoginURI *url.URL
// Pre-registered request_uri values that may be cached by the server.
RequestURIs []url.URL
}
// Defaults returns a shallow copy of ClientMetadata with default
// values replacing omitted fields.
func (m ClientMetadata) Defaults() ClientMetadata {
if len(m.ResponseTypes) == 0 {
m.ResponseTypes = []string{oauth2.ResponseTypeCode}
}
if len(m.GrantTypes) == 0 {
m.GrantTypes = []string{oauth2.GrantTypeAuthCode}
}
if m.ApplicationType == "" {
m.ApplicationType = "web"
}
if m.TokenEndpointAuthMethod == "" {
m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic
}
m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults()
m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults()
m.RequestObjectOptions = m.RequestObjectOptions.defaults()
return m
}
func (m *ClientMetadata) MarshalJSON() ([]byte, error) {
e := m.toEncodableStruct()
return json.Marshal(&e)
}
func (m *ClientMetadata) UnmarshalJSON(data []byte) error {
var e encodableClientMetadata
if err := json.Unmarshal(data, &e); err != nil {
return err
}
meta, err := e.toStruct()
if err != nil {
return err
}
if err := meta.Valid(); err != nil {
return err
}
*m = meta
return nil
}
type encodableClientMetadata struct {
RedirectURIs []string `json:"redirect_uris"` // Required
ResponseTypes []string `json:"response_types,omitempty"`
GrantTypes []string `json:"grant_types,omitempty"`
ApplicationType string `json:"application_type,omitempty"`
Contacts []string `json:"contacts,omitempty"`
ClientName string `json:"client_name,omitempty"`
LogoURI string `json:"logo_uri,omitempty"`
ClientURI string `json:"client_uri,omitempty"`
PolicyURI string `json:"policy_uri,omitempty"`
TermsOfServiceURI string `json:"tos_uri,omitempty"`
JWKSURI string `json:"jwks_uri,omitempty"`
JWKS *jose.JWKSet `json:"jwks,omitempty"`
SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"`
SubjectType string `json:"subject_type,omitempty"`
IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"`
IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"`
IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"`
UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"`
UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"`
UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"`
RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"`
RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"`
RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"`
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"`
TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"`
DefaultMaxAge int64 `json:"default_max_age,omitempty"`
RequireAuthTime bool `json:"require_auth_time,omitempty"`
DefaultACRValues []string `json:"default_acr_values,omitempty"`
InitiateLoginURI string `json:"initiate_login_uri,omitempty"`
RequestURIs []string `json:"request_uris,omitempty"`
}
func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) {
p := stickyErrParser{}
m := ClientMetadata{
RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"),
ResponseTypes: c.ResponseTypes,
GrantTypes: c.GrantTypes,
ApplicationType: c.ApplicationType,
Contacts: p.parseEmails(c.Contacts, "contacts"),
ClientName: c.ClientName,
LogoURI: p.parseURI(c.LogoURI, "logo_uri"),
ClientURI: p.parseURI(c.ClientURI, "client_uri"),
PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"),
TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"),
JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"),
JWKS: c.JWKS,
SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"),
SubjectType: c.SubjectType,
TokenEndpointAuthMethod: c.TokenEndpointAuthMethod,
TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg,
DefaultMaxAge: c.DefaultMaxAge,
RequireAuthTime: c.RequireAuthTime,
DefaultACRValues: c.DefaultACRValues,
InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"),
RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"),
IDTokenResponseOptions: JWAOptions{
c.IDTokenSignedResponseAlg,
c.IDTokenEncryptedResponseAlg,
c.IDTokenEncryptedResponseEnc,
},
UserInfoResponseOptions: JWAOptions{
c.UserInfoSignedResponseAlg,
c.UserInfoEncryptedResponseAlg,
c.UserInfoEncryptedResponseEnc,
},
RequestObjectOptions: JWAOptions{
c.RequestObjectSigningAlg,
c.RequestObjectEncryptionAlg,
c.RequestObjectEncryptionEnc,
},
}
if p.firstErr != nil {
return ClientMetadata{}, p.firstErr
}
return m, nil
}
// stickyErrParser parses URIs and email addresses. Once it encounters
// a parse error, subsequent calls become no-op.
type stickyErrParser struct {
firstErr error
}
func (p *stickyErrParser) parseURI(s, field string) *url.URL {
if p.firstErr != nil || s == "" {
return nil
}
u, err := url.Parse(s)
if err == nil {
if u.Host == "" {
err = errors.New("no host in URI")
} else if u.Scheme != "http" && u.Scheme != "https" {
err = errors.New("invalid URI scheme")
}
}
if err != nil {
p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err)
return nil
}
return u
}
func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL {
if p.firstErr != nil || len(s) == 0 {
return nil
}
uris := make([]url.URL, len(s))
for i, val := range s {
if val == "" {
p.firstErr = fmt.Errorf("invalid URI in field %s", field)
return nil
}
if u := p.parseURI(val, field); u != nil {
uris[i] = *u
}
}
return uris
}
func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address {
if p.firstErr != nil || len(s) == 0 {
return nil
}
addrs := make([]mail.Address, len(s))
for i, addr := range s {
if addr == "" {
p.firstErr = fmt.Errorf("invalid email in field %s", field)
return nil
}
a, err := mail.ParseAddress(addr)
if err != nil {
p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err)
return nil
}
addrs[i] = *a
}
return addrs
}
func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata {
return encodableClientMetadata{
RedirectURIs: urisToStrings(m.RedirectURIs),
ResponseTypes: m.ResponseTypes,
GrantTypes: m.GrantTypes,
ApplicationType: m.ApplicationType,
Contacts: emailsToStrings(m.Contacts),
ClientName: m.ClientName,
LogoURI: uriToString(m.LogoURI),
ClientURI: uriToString(m.ClientURI),
PolicyURI: uriToString(m.PolicyURI),
TermsOfServiceURI: uriToString(m.TermsOfServiceURI),
JWKSURI: uriToString(m.JWKSURI),
JWKS: m.JWKS,
SectorIdentifierURI: uriToString(m.SectorIdentifierURI),
SubjectType: m.SubjectType,
IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg,
IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg,
IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc,
UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg,
UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg,
UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc,
RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg,
RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg,
RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc,
TokenEndpointAuthMethod: m.TokenEndpointAuthMethod,
TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg,
DefaultMaxAge: m.DefaultMaxAge,
RequireAuthTime: m.RequireAuthTime,
DefaultACRValues: m.DefaultACRValues,
InitiateLoginURI: uriToString(m.InitiateLoginURI),
RequestURIs: urisToStrings(m.RequestURIs),
}
}
func uriToString(u *url.URL) string {
if u == nil {
return ""
}
return u.String()
}
func urisToStrings(urls []url.URL) []string {
if len(urls) == 0 {
return nil
}
sli := make([]string, len(urls))
for i, u := range urls {
sli[i] = u.String()
}
return sli
}
func emailsToStrings(addrs []mail.Address) []string {
if len(addrs) == 0 {
return nil
}
sli := make([]string, len(addrs))
for i, addr := range addrs {
sli[i] = addr.String()
}
return sli
}
// Valid determines if a ClientMetadata conforms with the OIDC specification.
//
// Valid is called by UnmarshalJSON.
//
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
// URLs fields where the OIDC spec requires it. This may change in future releases
// of this package. See: https://github.com/coreos/go-oidc/issues/34
func (m *ClientMetadata) Valid() error {
if len(m.RedirectURIs) == 0 {
return errors.New("zero redirect URLs")
}
validURI := func(u *url.URL, fieldName string) error {
if u.Host == "" {
return fmt.Errorf("no host for uri field %s", fieldName)
}
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("uri field %s scheme is not http or https", fieldName)
}
return nil
}
uris := []struct {
val *url.URL
name string
}{
{m.LogoURI, "logo_uri"},
{m.ClientURI, "client_uri"},
{m.PolicyURI, "policy_uri"},
{m.TermsOfServiceURI, "tos_uri"},
{m.JWKSURI, "jwks_uri"},
{m.SectorIdentifierURI, "sector_identifier_uri"},
{m.InitiateLoginURI, "initiate_login_uri"},
}
for _, uri := range uris {
if uri.val == nil {
continue
}
if err := validURI(uri.val, uri.name); err != nil {
return err
}
}
uriLists := []struct {
vals []url.URL
name string
}{
{m.RedirectURIs, "redirect_uris"},
{m.RequestURIs, "request_uris"},
}
for _, list := range uriLists {
for _, uri := range list.vals {
if err := validURI(&uri, list.name); err != nil {
return err
}
}
}
options := []struct {
option JWAOptions
name string
}{
{m.IDTokenResponseOptions, "id_token response"},
{m.UserInfoResponseOptions, "userinfo response"},
{m.RequestObjectOptions, "request_object"},
}
for _, option := range options {
if err := option.option.valid(); err != nil {
return fmt.Errorf("invalid JWA values for %s: %v", option.name, err)
}
}
return nil
}
type ClientRegistrationResponse struct {
ClientID string // Required
ClientSecret string
RegistrationAccessToken string
RegistrationClientURI string
// If IsZero is true, unspecified.
ClientIDIssuedAt time.Time
// Time at which the client_secret will expire.
// If IsZero is true, it will not expire.
ClientSecretExpiresAt time.Time
ClientMetadata
}
type encodableClientRegistrationResponse struct {
ClientID string `json:"client_id"` // Required
ClientSecret string `json:"client_secret,omitempty"`
RegistrationAccessToken string `json:"registration_access_token,omitempty"`
RegistrationClientURI string `json:"registration_client_uri,omitempty"`
ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"`
// Time at which the client_secret will expire, in seconds since the epoch.
// If 0 it will not expire.
ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required
encodableClientMetadata
}
func unixToSec(t time.Time) int64 {
if t.IsZero() {
return 0
}
return t.Unix()
}
func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) {
e := encodableClientRegistrationResponse{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
RegistrationAccessToken: c.RegistrationAccessToken,
RegistrationClientURI: c.RegistrationClientURI,
ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt),
ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt),
encodableClientMetadata: c.ClientMetadata.toEncodableStruct(),
}
return json.Marshal(&e)
}
func secToUnix(sec int64) time.Time {
if sec == 0 {
return time.Time{}
}
return time.Unix(sec, 0)
}
func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error {
var e encodableClientRegistrationResponse
if err := json.Unmarshal(data, &e); err != nil {
return err
}
if e.ClientID == "" {
return errors.New("no client_id in client registration response")
}
metadata, err := e.encodableClientMetadata.toStruct()
if err != nil {
return err
}
*c = ClientRegistrationResponse{
ClientID: e.ClientID,
ClientSecret: e.ClientSecret,
RegistrationAccessToken: e.RegistrationAccessToken,
RegistrationClientURI: e.RegistrationClientURI,
ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt),
ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt),
ClientMetadata: metadata,
}
return nil
}
type ClientConfig struct {
HTTPClient phttp.Client
Credentials ClientCredentials
Scope []string
RedirectURL string
ProviderConfig ProviderConfig
KeySet key.PublicKeySet
}
func NewClient(cfg ClientConfig) (*Client, error) {
// Allow empty redirect URL in the case where the client
// only needs to verify a given token.
ru, err := url.Parse(cfg.RedirectURL)
if err != nil {
return nil, fmt.Errorf("invalid redirect URL: %v", err)
}
c := Client{
credentials: cfg.Credentials,
httpClient: cfg.HTTPClient,
scope: cfg.Scope,
redirectURL: ru.String(),
providerConfig: newProviderConfigRepo(cfg.ProviderConfig),
keySet: cfg.KeySet,
}
if c.httpClient == nil {
c.httpClient = http.DefaultClient
}
if c.scope == nil {
c.scope = make([]string, len(DefaultScope))
copy(c.scope, DefaultScope)
}
return &c, nil
}
type Client struct {
httpClient phttp.Client
providerConfig *providerConfigRepo
credentials ClientCredentials
redirectURL string
scope []string
keySet key.PublicKeySet
providerSyncer *ProviderConfigSyncer
keySetSyncMutex sync.RWMutex
lastKeySetSync time.Time
}
func (c *Client) Healthy() error {
now := time.Now().UTC()
cfg := c.providerConfig.Get()
if cfg.Empty() {
return errors.New("oidc client provider config empty")
}
if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) {
return errors.New("oidc client provider config expired")
}
return nil
}
func (c *Client) OAuthClient() (*oauth2.Client, error) {
cfg := c.providerConfig.Get()
authMethod, err := chooseAuthMethod(cfg)
if err != nil {
return nil, err
}
ocfg := oauth2.Config{
Credentials: oauth2.ClientCredentials(c.credentials),
RedirectURL: c.redirectURL,
AuthURL: cfg.AuthEndpoint.String(),
TokenURL: cfg.TokenEndpoint.String(),
Scope: c.scope,
AuthMethod: authMethod,
}
return oauth2.NewClient(c.httpClient, ocfg)
}
func chooseAuthMethod(cfg ProviderConfig) (string, error) {
if len(cfg.TokenEndpointAuthMethodsSupported) == 0 {
return oauth2.AuthMethodClientSecretBasic, nil
}
for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported {
if _, ok := supportedAuthMethods[authMethod]; ok {
return authMethod, nil
}
}
return "", errors.New("no supported auth methods")
}
// SyncProviderConfig starts the provider config syncer
func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} {
r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL)
s := NewProviderConfigSyncer(r, c.providerConfig)
stop := s.Run()
s.WaitUntilInitialSync()
return stop
}
func (c *Client) maybeSyncKeys() error {
tooSoon := func() bool {
return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow))
}
// ignore request to sync keys if a sync operation has been
// attempted too recently
if tooSoon() {
return nil
}
c.keySetSyncMutex.Lock()
defer c.keySetSyncMutex.Unlock()
// check again, as another goroutine may have been holding
// the lock while updating the keys
if tooSoon() {
return nil
}
cfg := c.providerConfig.Get()
r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String())
w := &clientKeyRepo{client: c}
_, err := key.Sync(r, w)
c.lastKeySetSync = time.Now().UTC()
return err
}
type clientKeyRepo struct {
client *Client
}
func (r *clientKeyRepo) Set(ks key.KeySet) error {
pks, ok := ks.(*key.PublicKeySet)
if !ok {
return errors.New("unable to cast to PublicKey")
}
r.client.keySet = *pks
return nil
}
func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) {
cfg := c.providerConfig.Get()
if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) {
return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds)
}
oac, err := c.OAuthClient()
if err != nil {
return jose.JWT{}, err
}
t, err := oac.ClientCredsToken(scope)
if err != nil {
return jose.JWT{}, err
}
jwt, err := jose.ParseJWT(t.IDToken)
if err != nil {
return jose.JWT{}, err
}
return jwt, c.VerifyJWT(jwt)
}
// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token.
func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) {
oac, err := c.OAuthClient()
if err != nil {
return jose.JWT{}, err
}
t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
if err != nil {
return jose.JWT{}, err
}
jwt, err := jose.ParseJWT(t.IDToken)
if err != nil {
return jose.JWT{}, err
}
return jwt, c.VerifyJWT(jwt)
}
// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token.
func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) {
oac, err := c.OAuthClient()
if err != nil {
return jose.JWT{}, err
}
t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken)
if err != nil {
return jose.JWT{}, err
}
jwt, err := jose.ParseJWT(t.IDToken)
if err != nil {
return jose.JWT{}, err
}
return jwt, c.VerifyJWT(jwt)
}
func (c *Client) VerifyJWT(jwt jose.JWT) error {
var keysFunc func() []key.PublicKey
if kID, ok := jwt.KeyID(); ok {
keysFunc = c.keysFuncWithID(kID)
} else {
keysFunc = c.keysFuncAll()
}
v := NewJWTVerifier(
c.providerConfig.Get().Issuer.String(),
c.credentials.ID,
c.maybeSyncKeys, keysFunc)
return v.Verify(jwt)
}
// keysFuncWithID returns a function that retrieves at most unexpired
// public key from the Client that matches the provided ID
func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey {
return func() []key.PublicKey {
c.keySetSyncMutex.RLock()
defer c.keySetSyncMutex.RUnlock()
if c.keySet.ExpiresAt().Before(time.Now()) {
return []key.PublicKey{}
}
k := c.keySet.Key(kID)
if k == nil {
return []key.PublicKey{}
}
return []key.PublicKey{*k}
}
}
// keysFuncAll returns a function that retrieves all unexpired public
// keys from the Client
func (c *Client) keysFuncAll() func() []key.PublicKey {
return func() []key.PublicKey {
c.keySetSyncMutex.RLock()
defer c.keySetSyncMutex.RUnlock()
if c.keySet.ExpiresAt().Before(time.Now()) {
return []key.PublicKey{}
}
return c.keySet.Keys()
}
}
type providerConfigRepo struct {
mu sync.RWMutex
config ProviderConfig // do not access directly, use Get()
}
func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
return &providerConfigRepo{sync.RWMutex{}, pc}
}
// returns an error to implement ProviderConfigSetter
func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
r.mu.Lock()
defer r.mu.Unlock()
r.config = cfg
return nil
}
func (r *providerConfigRepo) Get() ProviderConfig {
r.mu.RLock()
defer r.mu.RUnlock()
return r.config
}

View File

@ -0,0 +1,44 @@
package oidc
import (
"errors"
"time"
"github.com/coreos/go-oidc/jose"
)
type Identity struct {
ID string
Name string
Email string
ExpiresAt time.Time
}
func IdentityFromClaims(claims jose.Claims) (*Identity, error) {
if claims == nil {
return nil, errors.New("nil claim set")
}
var ident Identity
var err error
var ok bool
if ident.ID, ok, err = claims.StringClaim("sub"); err != nil {
return nil, err
} else if !ok {
return nil, errors.New("missing required claim: sub")
}
if ident.Email, _, err = claims.StringClaim("email"); err != nil {
return nil, err
}
exp, ok, err := claims.TimeClaim("exp")
if err != nil {
return nil, err
} else if ok {
ident.ExpiresAt = exp
}
return &ident, nil
}

View File

@ -0,0 +1,3 @@
package oidc
type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error)

View File

@ -0,0 +1,67 @@
package oidc
import (
"encoding/json"
"errors"
"net/http"
"time"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/key"
)
// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no
// Cache-Control header is provided by the JWK Set document endpoint.
const DefaultPublicKeySetTTL = 24 * time.Hour
// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document.
func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo {
return &remotePublicKeyRepo{hc: hc, ep: ep}
}
type remotePublicKeyRepo struct {
hc phttp.Client
ep string
}
// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL
// is set on the Key Set to avoid it having to be re-retrieved for every
// encryption event. This TTL is typically controlled by the endpoint returning
// a Cache-Control header, but defaults to 24 hours if no Cache-Control header
// is found.
func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
req, err := http.NewRequest("GET", r.ep, nil)
if err != nil {
return nil, err
}
resp, err := r.hc.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var d struct {
Keys []jose.JWK `json:"keys"`
}
if err := json.NewDecoder(resp.Body).Decode(&d); err != nil {
return nil, err
}
if len(d.Keys) == 0 {
return nil, errors.New("zero keys in response")
}
ttl, ok, err := phttp.Cacheable(resp.Header)
if err != nil {
return nil, err
}
if !ok {
ttl = DefaultPublicKeySetTTL
}
exp := time.Now().UTC().Add(ttl)
ks := key.NewPublicKeySet(d.Keys, exp)
return ks, nil
}

View File

@ -0,0 +1,690 @@
package oidc
import (
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/coreos/pkg/timeutil"
"github.com/jonboulle/clockwork"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/oauth2"
)
const (
// Subject Identifier types defined by the OIDC spec. Specifies if the provider
// should provide the same sub claim value to all clients (public) or a unique
// value for each client (pairwise).
//
// See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
SubjectTypePublic = "public"
SubjectTypePairwise = "pairwise"
)
var (
// Default values for omitted provider config fields.
//
// Use ProviderConfig's Defaults method to fill a provider config with these values.
DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
DefaultResponseModesSupported = []string{"query", "fragment"}
DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic}
DefaultClaimTypesSupported = []string{"normal"}
)
const (
MaximumProviderConfigSyncInterval = 24 * time.Hour
MinimumProviderConfigSyncInterval = time.Minute
discoveryConfigPath = "/.well-known/openid-configuration"
)
// internally configurable for tests
var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval
var (
// Ensure ProviderConfig satisfies these interfaces.
_ json.Marshaler = &ProviderConfig{}
_ json.Unmarshaler = &ProviderConfig{}
)
// ProviderConfig represents the OpenID Provider Metadata specifying what
// configurations a provider supports.
//
// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
type ProviderConfig struct {
Issuer *url.URL // Required
AuthEndpoint *url.URL // Required
TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported
UserInfoEndpoint *url.URL
KeysEndpoint *url.URL // Required
RegistrationEndpoint *url.URL
EndSessionEndpoint *url.URL
CheckSessionIFrame *url.URL
// Servers MAY choose not to advertise some supported scope values even when this
// parameter is used, although those defined in OpenID Core SHOULD be listed, if supported.
ScopesSupported []string
// OAuth2.0 response types supported.
ResponseTypesSupported []string // Required
// OAuth2.0 response modes supported.
//
// If omitted, defaults to DefaultResponseModesSupported.
ResponseModesSupported []string
// OAuth2.0 grant types supported.
//
// If omitted, defaults to DefaultGrantTypesSupported.
GrantTypesSupported []string
ACRValuesSupported []string
// SubjectTypesSupported specifies strategies for providing values for the sub claim.
SubjectTypesSupported []string // Required
// JWA signing and encryption algorith values supported for ID tokens.
IDTokenSigningAlgValues []string // Required
IDTokenEncryptionAlgValues []string
IDTokenEncryptionEncValues []string
// JWA signing and encryption algorith values supported for user info responses.
UserInfoSigningAlgValues []string
UserInfoEncryptionAlgValues []string
UserInfoEncryptionEncValues []string
// JWA signing and encryption algorith values supported for request objects.
ReqObjSigningAlgValues []string
ReqObjEncryptionAlgValues []string
ReqObjEncryptionEncValues []string
TokenEndpointAuthMethodsSupported []string
TokenEndpointAuthSigningAlgValuesSupported []string
DisplayValuesSupported []string
ClaimTypesSupported []string
ClaimsSupported []string
ServiceDocs *url.URL
ClaimsLocalsSupported []string
UILocalsSupported []string
ClaimsParameterSupported bool
RequestParameterSupported bool
RequestURIParamaterSupported bool
RequireRequestURIRegistration bool
Policy *url.URL
TermsOfService *url.URL
// Not part of the OpenID Provider Metadata
ExpiresAt time.Time
}
// Defaults returns a shallow copy of ProviderConfig with default
// values replacing omitted fields.
//
// var cfg oidc.ProviderConfig
// // Fill provider config with default values for omitted fields.
// cfg = cfg.Defaults()
//
func (p ProviderConfig) Defaults() ProviderConfig {
setDefault := func(val *[]string, defaultVal []string) {
if len(*val) == 0 {
*val = defaultVal
}
}
setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported)
setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported)
setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported)
setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported)
return p
}
func (p *ProviderConfig) MarshalJSON() ([]byte, error) {
e := p.toEncodableStruct()
return json.Marshal(&e)
}
func (p *ProviderConfig) UnmarshalJSON(data []byte) error {
var e encodableProviderConfig
if err := json.Unmarshal(data, &e); err != nil {
return err
}
conf, err := e.toStruct()
if err != nil {
return err
}
if err := conf.Valid(); err != nil {
return err
}
*p = conf
return nil
}
type encodableProviderConfig struct {
Issuer string `json:"issuer"`
AuthEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
KeysEndpoint string `json:"jwks_uri"`
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
EndSessionEndpoint string `json:"end_session_endpoint,omitempty"`
CheckSessionIFrame string `json:"check_session_iframe,omitempty"`
// Use 'omitempty' for all slices as per OIDC spec:
// "Claims that return multiple values are represented as JSON arrays.
// Claims with zero elements MUST be omitted from the response."
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
ScopesSupported []string `json:"scopes_supported,omitempty"`
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"`
IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"`
IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"`
UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"`
UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"`
ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"`
ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
ClaimsSupported []string `json:"claims_supported,omitempty"`
ServiceDocs string `json:"service_documentation,omitempty"`
ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"`
UILocalsSupported []string `json:"ui_locales_supported,omitempty"`
ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"`
RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"`
Policy string `json:"op_policy_uri,omitempty"`
TermsOfService string `json:"op_tos_uri,omitempty"`
}
func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig {
return encodableProviderConfig{
Issuer: uriToString(cfg.Issuer),
AuthEndpoint: uriToString(cfg.AuthEndpoint),
TokenEndpoint: uriToString(cfg.TokenEndpoint),
UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint),
KeysEndpoint: uriToString(cfg.KeysEndpoint),
RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint),
EndSessionEndpoint: uriToString(cfg.EndSessionEndpoint),
CheckSessionIFrame: uriToString(cfg.CheckSessionIFrame),
ScopesSupported: cfg.ScopesSupported,
ResponseTypesSupported: cfg.ResponseTypesSupported,
ResponseModesSupported: cfg.ResponseModesSupported,
GrantTypesSupported: cfg.GrantTypesSupported,
ACRValuesSupported: cfg.ACRValuesSupported,
SubjectTypesSupported: cfg.SubjectTypesSupported,
IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues,
IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues,
IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues,
UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues,
UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues,
UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues,
ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues,
ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues,
ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues,
TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported,
TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported,
DisplayValuesSupported: cfg.DisplayValuesSupported,
ClaimTypesSupported: cfg.ClaimTypesSupported,
ClaimsSupported: cfg.ClaimsSupported,
ServiceDocs: uriToString(cfg.ServiceDocs),
ClaimsLocalsSupported: cfg.ClaimsLocalsSupported,
UILocalsSupported: cfg.UILocalsSupported,
ClaimsParameterSupported: cfg.ClaimsParameterSupported,
RequestParameterSupported: cfg.RequestParameterSupported,
RequestURIParamaterSupported: cfg.RequestURIParamaterSupported,
RequireRequestURIRegistration: cfg.RequireRequestURIRegistration,
Policy: uriToString(cfg.Policy),
TermsOfService: uriToString(cfg.TermsOfService),
}
}
func (e encodableProviderConfig) toStruct() (ProviderConfig, error) {
p := stickyErrParser{}
conf := ProviderConfig{
Issuer: p.parseURI(e.Issuer, "issuer"),
AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"),
TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"),
UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"),
KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"),
RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"),
EndSessionEndpoint: p.parseURI(e.EndSessionEndpoint, "end_session_endpoint"),
CheckSessionIFrame: p.parseURI(e.CheckSessionIFrame, "check_session_iframe"),
ScopesSupported: e.ScopesSupported,
ResponseTypesSupported: e.ResponseTypesSupported,
ResponseModesSupported: e.ResponseModesSupported,
GrantTypesSupported: e.GrantTypesSupported,
ACRValuesSupported: e.ACRValuesSupported,
SubjectTypesSupported: e.SubjectTypesSupported,
IDTokenSigningAlgValues: e.IDTokenSigningAlgValues,
IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues,
IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues,
UserInfoSigningAlgValues: e.UserInfoSigningAlgValues,
UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues,
UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues,
ReqObjSigningAlgValues: e.ReqObjSigningAlgValues,
ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues,
ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues,
TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported,
TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported,
DisplayValuesSupported: e.DisplayValuesSupported,
ClaimTypesSupported: e.ClaimTypesSupported,
ClaimsSupported: e.ClaimsSupported,
ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"),
ClaimsLocalsSupported: e.ClaimsLocalsSupported,
UILocalsSupported: e.UILocalsSupported,
ClaimsParameterSupported: e.ClaimsParameterSupported,
RequestParameterSupported: e.RequestParameterSupported,
RequestURIParamaterSupported: e.RequestURIParamaterSupported,
RequireRequestURIRegistration: e.RequireRequestURIRegistration,
Policy: p.parseURI(e.Policy, "op_policy-uri"),
TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"),
}
if p.firstErr != nil {
return ProviderConfig{}, p.firstErr
}
return conf, nil
}
// Empty returns if a ProviderConfig holds no information.
//
// This case generally indicates a ProviderConfigGetter has experienced an error
// and has nothing to report.
func (p ProviderConfig) Empty() bool {
return p.Issuer == nil
}
func contains(sli []string, ele string) bool {
for _, s := range sli {
if s == ele {
return true
}
}
return false
}
// Valid determines if a ProviderConfig conforms with the OIDC specification.
// If Valid returns successfully it guarantees required field are non-nil and
// URLs are well formed.
//
// Valid is called by UnmarshalJSON.
//
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
// URLs fields where the OIDC spec requires it. This may change in future releases
// of this package. See: https://github.com/coreos/go-oidc/issues/34
func (p ProviderConfig) Valid() error {
grantTypes := p.GrantTypesSupported
if len(grantTypes) == 0 {
grantTypes = DefaultGrantTypesSupported
}
implicitOnly := true
for _, grantType := range grantTypes {
if grantType != oauth2.GrantTypeImplicit {
implicitOnly = false
break
}
}
if len(p.SubjectTypesSupported) == 0 {
return errors.New("missing required field subject_types_supported")
}
if len(p.IDTokenSigningAlgValues) == 0 {
return errors.New("missing required field id_token_signing_alg_values_supported")
}
if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") {
return errors.New("scoped_supported must be unspecified or include 'openid'")
}
if !contains(p.IDTokenSigningAlgValues, "RS256") {
return errors.New("id_token_signing_alg_values_supported must include 'RS256'")
}
if contains(p.TokenEndpointAuthMethodsSupported, "none") {
return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'")
}
uris := []struct {
val *url.URL
name string
required bool
}{
{p.Issuer, "issuer", true},
{p.AuthEndpoint, "authorization_endpoint", true},
{p.TokenEndpoint, "token_endpoint", !implicitOnly},
{p.UserInfoEndpoint, "userinfo_endpoint", false},
{p.KeysEndpoint, "jwks_uri", true},
{p.RegistrationEndpoint, "registration_endpoint", false},
{p.EndSessionEndpoint, "end_session_endpoint", false},
{p.CheckSessionIFrame, "check_session_iframe", false},
{p.ServiceDocs, "service_documentation", false},
{p.Policy, "op_policy_uri", false},
{p.TermsOfService, "op_tos_uri", false},
}
for _, uri := range uris {
if uri.val == nil {
if !uri.required {
continue
}
return fmt.Errorf("empty value for required uri field %s", uri.name)
}
if uri.val.Host == "" {
return fmt.Errorf("no host for uri field %s", uri.name)
}
if uri.val.Scheme != "http" && uri.val.Scheme != "https" {
return fmt.Errorf("uri field %s schemeis not http or https", uri.name)
}
}
return nil
}
// Supports determines if provider supports a client given their respective metadata.
func (p ProviderConfig) Supports(c ClientMetadata) error {
if err := p.Valid(); err != nil {
return fmt.Errorf("invalid provider config: %v", err)
}
if err := c.Valid(); err != nil {
return fmt.Errorf("invalid client config: %v", err)
}
// Fill default values for omitted fields
c = c.Defaults()
p = p.Defaults()
// Do the supported values list the requested one?
supports := []struct {
supported []string
requested string
name string
}{
{p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"},
{p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"},
{p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"},
{p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"},
{p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"},
{p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"},
{p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"},
{p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"},
{p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"},
}
for _, field := range supports {
if field.requested == "" {
continue
}
if !contains(field.supported, field.requested) {
return fmt.Errorf("provider does not support requested value for field %s", field.name)
}
}
stringsEqual := func(s1, s2 string) bool { return s1 == s2 }
// For lists, are the list of requested values a subset of the supported ones?
supportsAll := []struct {
supported []string
requested []string
name string
// OAuth2.0 response_type can be space separated lists where order doesn't matter.
// For example "id_token token" is the same as "token id_token"
// Support a custom compare method.
comp func(s1, s2 string) bool
}{
{p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual},
{p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual},
}
for _, field := range supportsAll {
requestLoop:
for _, req := range field.requested {
for _, sup := range field.supported {
if field.comp(req, sup) {
continue requestLoop
}
}
return fmt.Errorf("provider does not support requested value for field %s", field.name)
}
}
// TODO(ericchiang): Are there more checks we feel comfortable with begin strict about?
return nil
}
func (p ProviderConfig) SupportsGrantType(grantType string) bool {
var supported []string
if len(p.GrantTypesSupported) == 0 {
supported = DefaultGrantTypesSupported
} else {
supported = p.GrantTypesSupported
}
for _, t := range supported {
if t == grantType {
return true
}
}
return false
}
type ProviderConfigGetter interface {
Get() (ProviderConfig, error)
}
type ProviderConfigSetter interface {
Set(ProviderConfig) error
}
type ProviderConfigSyncer struct {
from ProviderConfigGetter
to ProviderConfigSetter
clock clockwork.Clock
initialSyncDone bool
initialSyncWait sync.WaitGroup
}
func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer {
return &ProviderConfigSyncer{
from: from,
to: to,
clock: clockwork.NewRealClock(),
}
}
func (s *ProviderConfigSyncer) Run() chan struct{} {
stop := make(chan struct{})
var next pcsStepper
next = &pcsStepNext{aft: time.Duration(0)}
s.initialSyncWait.Add(1)
go func() {
for {
select {
case <-s.clock.After(next.after()):
next = next.step(s.sync)
case <-stop:
return
}
}
}()
return stop
}
func (s *ProviderConfigSyncer) WaitUntilInitialSync() {
s.initialSyncWait.Wait()
}
func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
cfg, err := s.from.Get()
if err != nil {
return 0, err
}
if err = s.to.Set(cfg); err != nil {
return 0, fmt.Errorf("error setting provider config: %v", err)
}
if !s.initialSyncDone {
s.initialSyncWait.Done()
s.initialSyncDone = true
}
return nextSyncAfter(cfg.ExpiresAt, s.clock), nil
}
type pcsStepFunc func() (time.Duration, error)
type pcsStepper interface {
after() time.Duration
step(pcsStepFunc) pcsStepper
}
type pcsStepNext struct {
aft time.Duration
}
func (n *pcsStepNext) after() time.Duration {
return n.aft
}
func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) {
ttl, err := fn()
if err == nil {
next = &pcsStepNext{aft: ttl}
} else {
next = &pcsStepRetry{aft: time.Second}
log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err)
}
return
}
type pcsStepRetry struct {
aft time.Duration
}
func (r *pcsStepRetry) after() time.Duration {
return r.aft
}
func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) {
ttl, err := fn()
if err == nil {
next = &pcsStepNext{aft: ttl}
} else {
next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)}
log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err)
}
return
}
func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration {
if exp.IsZero() {
return MaximumProviderConfigSyncInterval
}
t := exp.Sub(clock.Now()) / 2
if t > MaximumProviderConfigSyncInterval {
t = MaximumProviderConfigSyncInterval
} else if t < minimumProviderConfigSyncInterval {
t = minimumProviderConfigSyncInterval
}
return t
}
type httpProviderConfigGetter struct {
hc phttp.Client
issuerURL string
clock clockwork.Clock
}
func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter {
return &httpProviderConfigGetter{
hc: hc,
issuerURL: issuerURL,
clock: clockwork.NewRealClock(),
}
}
func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
// If the Issuer value contains a path component, any terminating / MUST be removed before
// appending /.well-known/openid-configuration.
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath
req, err := http.NewRequest("GET", discoveryURL, nil)
if err != nil {
return
}
resp, err := r.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
return
}
var ttl time.Duration
var ok bool
ttl, ok, err = phttp.Cacheable(resp.Header)
if err != nil {
return
} else if ok {
cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl)
}
// The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information.
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
if !urlEqual(cfg.Issuer.String(), r.issuerURL) {
err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL)
return
}
return
}
func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) {
if hc == nil {
hc = http.DefaultClient
}
g := NewHTTPProviderConfigGetter(hc, issuerURL)
return g.Get()
}
func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) {
return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock())
}
func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) {
var sleep time.Duration
var err error
for {
pcfg, err = FetchProviderConfig(hc, issuerURL)
if err == nil {
break
}
sleep = timeutil.ExpBackoff(sleep, time.Minute)
fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err)
time.Sleep(sleep)
}
return
}

View File

@ -0,0 +1,88 @@
package oidc
import (
"fmt"
"net/http"
"sync"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/jose"
)
type TokenRefresher interface {
// Verify checks if the provided token is currently valid or not.
Verify(jose.JWT) error
// Refresh attempts to authenticate and retrieve a new token.
Refresh() (jose.JWT, error)
}
type ClientCredsTokenRefresher struct {
Issuer string
OIDCClient *Client
}
func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) {
_, err = VerifyClientClaims(jwt, c.Issuer)
return
}
func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) {
if err = c.OIDCClient.Healthy(); err != nil {
err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err)
return
}
jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"})
if err != nil {
err = fmt.Errorf("unable to verify auth code with issuer: %v", err)
return
}
return
}
type AuthenticatedTransport struct {
TokenRefresher
http.RoundTripper
mu sync.Mutex
jwt jose.JWT
}
func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) {
t.mu.Lock()
defer t.mu.Unlock()
if t.TokenRefresher.Verify(t.jwt) == nil {
return t.jwt, nil
}
jwt, err := t.TokenRefresher.Refresh()
if err != nil {
return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err)
}
t.jwt = jwt
return t.jwt, nil
}
// SetJWT sets the JWT held by the Transport.
// This is useful for cases in which you want to set an initial JWT.
func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) {
t.mu.Lock()
defer t.mu.Unlock()
t.jwt = jwt
}
func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) {
jwt, err := t.verifiedJWT()
if err != nil {
return nil, err
}
req := phttp.CopyRequest(r)
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode()))
return t.RoundTripper.RoundTrip(req)
}

View File

@ -0,0 +1,109 @@
package oidc
import (
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/coreos/go-oidc/jose"
)
// RequestTokenExtractor funcs extract a raw encoded token from a request.
type RequestTokenExtractor func(r *http.Request) (string, error)
// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's
// Authorization header.
func ExtractBearerToken(r *http.Request) (string, error) {
ah := r.Header.Get("Authorization")
if ah == "" {
return "", errors.New("missing Authorization header")
}
if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" {
return "", errors.New("should be a bearer token")
}
val := ah[7:]
if len(val) == 0 {
return "", errors.New("bearer token is empty")
}
return val, nil
}
// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request.
func CookieTokenExtractor(cookieName string) RequestTokenExtractor {
return func(r *http.Request) (string, error) {
ck, err := r.Cookie(cookieName)
if err != nil {
return "", fmt.Errorf("token cookie not found in request: %v", err)
}
if ck.Value == "" {
return "", errors.New("token cookie found but is empty")
}
return ck.Value, nil
}
}
func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims {
return jose.Claims{
// required
"iss": iss,
"sub": sub,
"aud": aud,
"iat": iat.Unix(),
"exp": exp.Unix(),
}
}
func GenClientID(hostport string) (string, error) {
b, err := randBytes(32)
if err != nil {
return "", err
}
var host string
if strings.Contains(hostport, ":") {
host, _, err = net.SplitHostPort(hostport)
if err != nil {
return "", err
}
} else {
host = hostport
}
return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil
}
func randBytes(n int) ([]byte, error) {
b := make([]byte, n)
got, err := rand.Read(b)
if err != nil {
return nil, err
} else if n != got {
return nil, errors.New("unable to generate enough random data")
}
return b, nil
}
// urlEqual checks two urls for equality using only the host and path portions.
func urlEqual(url1, url2 string) bool {
u1, err := url.Parse(url1)
if err != nil {
return false
}
u2, err := url.Parse(url2)
if err != nil {
return false
}
return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path)
}

View File

@ -0,0 +1,190 @@
package oidc
import (
"errors"
"fmt"
"time"
"github.com/jonboulle/clockwork"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/key"
)
func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) {
jwtBytes := []byte(jwt.Data())
for _, k := range keys {
v, err := k.Verifier()
if err != nil {
return false, err
}
if v.Verify(jwt.Signature, jwtBytes) == nil {
return true, nil
}
}
return false, nil
}
// containsString returns true if the given string(needle) is found
// in the string array(haystack).
func containsString(needle string, haystack []string) bool {
for _, v := range haystack {
if v == needle {
return true
}
}
return false
}
// Verify claims in accordance with OIDC spec
// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation
func VerifyClaims(jwt jose.JWT, issuer, clientID string) error {
now := time.Now().UTC()
claims, err := jwt.Claims()
if err != nil {
return err
}
ident, err := IdentityFromClaims(claims)
if err != nil {
return err
}
if ident.ExpiresAt.Before(now) {
return errors.New("token is expired")
}
// iss REQUIRED. Issuer Identifier for the Issuer of the response.
// The iss value is a case sensitive URL using the https scheme that contains scheme,
// host, and optionally, port number and path components and no query or fragment components.
if iss, exists := claims["iss"].(string); exists {
if !urlEqual(iss, issuer) {
return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss)
}
} else {
return errors.New("missing claim: 'iss'")
}
// iat REQUIRED. Time at which the JWT was issued.
// Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z
// as measured in UTC until the date/time.
if _, exists := claims["iat"].(float64); !exists {
return errors.New("missing claim: 'iat'")
}
// aud REQUIRED. Audience(s) that this ID Token is intended for.
// It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value.
// It MAY also contain identifiers for other audiences. In the general case, the aud
// value is an array of case sensitive strings. In the common special case when there
// is one audience, the aud value MAY be a single case sensitive string.
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
if aud != clientID {
return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID)
}
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
if !containsString(clientID, aud) {
return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID)
}
} else {
return errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
}
return nil
}
// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT.
// Returns the client ID if valid, or an error if invalid.
func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) {
claims, err := jwt.Claims()
if err != nil {
return "", fmt.Errorf("failed to parse JWT claims: %v", err)
}
iss, ok, err := claims.StringClaim("iss")
if err != nil {
return "", fmt.Errorf("failed to parse 'iss' claim: %v", err)
} else if !ok {
return "", errors.New("missing required 'iss' claim")
} else if !urlEqual(iss, issuer) {
return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss)
}
sub, ok, err := claims.StringClaim("sub")
if err != nil {
return "", fmt.Errorf("failed to parse 'sub' claim: %v", err)
} else if !ok {
return "", errors.New("missing required 'sub' claim")
}
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
if aud != sub {
return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub)
}
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
if !containsString(sub, aud) {
return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub)
}
} else {
return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
}
now := time.Now().UTC()
exp, ok, err := claims.TimeClaim("exp")
if err != nil {
return "", fmt.Errorf("failed to parse 'exp' claim: %v", err)
} else if !ok {
return "", errors.New("missing required 'exp' claim")
} else if exp.Before(now) {
return "", fmt.Errorf("token already expired at: %v", exp)
}
return sub, nil
}
type JWTVerifier struct {
issuer string
clientID string
syncFunc func() error
keysFunc func() []key.PublicKey
clock clockwork.Clock
}
func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier {
return JWTVerifier{
issuer: issuer,
clientID: clientID,
syncFunc: syncFunc,
keysFunc: keysFunc,
clock: clockwork.NewRealClock(),
}
}
func (v *JWTVerifier) Verify(jwt jose.JWT) error {
// Verify claims before verifying the signature. This is an optimization to throw out
// tokens we know are invalid without undergoing an expensive signature check and
// possibly a re-sync event.
if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil {
return fmt.Errorf("oidc: JWT claims invalid: %v", err)
}
ok, err := VerifySignature(jwt, v.keysFunc())
if err != nil {
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
} else if ok {
return nil
}
if err = v.syncFunc(); err != nil {
return fmt.Errorf("oidc: failed syncing KeySet: %v", err)
}
ok, err = VerifySignature(jwt, v.keysFunc())
if err != nil {
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
} else if !ok {
return errors.New("oidc: unable to verify JWT signature: no matching keys")
}
return nil
}

View File

@ -0,0 +1,11 @@
health
====
A simple framework for implementing an HTTP health check endpoint on servers.
Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`.
### Documentation
For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health)

View File

@ -0,0 +1,127 @@
package health
import (
"expvar"
"fmt"
"log"
"net/http"
"github.com/coreos/pkg/httputil"
)
// Checkables should return nil when the thing they are checking is healthy, and an error otherwise.
type Checkable interface {
Healthy() error
}
// Checker provides a way to make an endpoint which can be probed for system health.
type Checker struct {
// Checks are the Checkables to be checked when probing.
Checks []Checkable
// Unhealthyhandler is called when one or more of the checks are unhealthy.
// If not provided DefaultUnhealthyHandler is called.
UnhealthyHandler UnhealthyHandler
// HealthyHandler is called when all checks are healthy.
// If not provided, DefaultHealthyHandler is called.
HealthyHandler http.HandlerFunc
}
func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
unhealthyHandler := c.UnhealthyHandler
if unhealthyHandler == nil {
unhealthyHandler = DefaultUnhealthyHandler
}
successHandler := c.HealthyHandler
if successHandler == nil {
successHandler = DefaultHealthyHandler
}
if r.Method != "GET" {
w.Header().Set("Allow", "GET")
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
if err := Check(c.Checks); err != nil {
unhealthyHandler(w, r, err)
return
}
successHandler(w, r)
}
type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error)
type StatusResponse struct {
Status string `json:"status"`
Details *StatusResponseDetails `json:"details,omitempty"`
}
type StatusResponseDetails struct {
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
func Check(checks []Checkable) (err error) {
errs := []error{}
for _, c := range checks {
if e := c.Healthy(); e != nil {
errs = append(errs, e)
}
}
switch len(errs) {
case 0:
err = nil
case 1:
err = errs[0]
default:
err = fmt.Errorf("multiple health check failure: %v", errs)
}
return
}
func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) {
err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{
Status: "ok",
})
if err != nil {
// TODO(bobbyrullo): replace with logging from new logging pkg,
// once it lands.
log.Printf("Failed to write JSON response: %v", err)
}
}
func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) {
writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{
Status: "error",
Details: &StatusResponseDetails{
Code: http.StatusInternalServerError,
Message: err.Error(),
},
})
if writeErr != nil {
// TODO(bobbyrullo): replace with logging from new logging pkg,
// once it lands.
log.Printf("Failed to write JSON response: %v", err)
}
}
// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported.
func ExpvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}

View File

@ -0,0 +1,13 @@
httputil
====
Common code for dealing with HTTP.
Includes:
* Code for returning JSON responses.
### Documentation
Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil)

View File

@ -0,0 +1,21 @@
package httputil
import (
"net/http"
"time"
)
// DeleteCookies effectively deletes all named cookies
// by wiping all data and setting to expire immediately.
func DeleteCookies(w http.ResponseWriter, cookieNames ...string) {
for _, n := range cookieNames {
c := &http.Cookie{
Name: n,
Value: "",
Path: "/",
MaxAge: -1,
Expires: time.Time{},
}
http.SetCookie(w, c)
}
}

View File

@ -0,0 +1,27 @@
package httputil
import (
"encoding/json"
"net/http"
)
const (
JSONContentType = "application/json"
)
func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error {
enc, err := json.Marshal(resp)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return err
}
w.Header().Set("Content-Type", JSONContentType)
w.WriteHeader(code)
_, err = w.Write(enc)
if err != nil {
return err
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More