Jaume Devesa 8c353be83b Move arrakis here
Most of the arrakis code moved here, using puppet-cassandra and
puppet-zookeeper as dependencies

Change-Id: I6bdfb80457c500e2c6ea98aeaa9e7c074757c6c7
2015-06-08 17:45:07 +02:00

169 lines
6.4 KiB
Plaintext
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

opyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Midolman configuration file
[zookeeper]
<%- zkarr = Array.new -%>
<%- @zk_servers.each do |s| -%>
<%- zkarr.push("#{s['ip']}:#{s['port'] ||= 2181 }") -%>
<%- end -%>
zookeeper_hosts = <%= zkarr.join(",") %>
session_timeout = 30000
midolman_root_key = /midonet/v1
session_gracetime = 30000
[cassandra]
# The minimum recommended cassandra setup is a 3-node cluster with a
# replication factor of 3. Midolman uses Quorum as consistency policy, which
# would translate to 2 in the suggested setup.
#
# Refer to the docs/cassandra-cache.md documentation for more specific details
servers = <%= @cs_seeds.join(",") %>
# DO CHANGE THIS, recommended value is 3
replication_factor = 1
cluster = midonet
[bridge]
mac_port_mapping_expire_millis = 15000
[arptable]
arp_retry_interval_seconds = 10
arp_timeout_seconds = 60
arp_stale_seconds = 1800
arp_expiration_seconds = 3600
[midolman]
disconnected_ttl_seconds = 30
control_interface = eth0
cache_type = cassandra
check_flow_expiration_interval = 10000 #millis
# top_level_actor_supervisor = resume
top_level_actor_supervisor = crash
# after requesting an update to the kernel if a flow with idle expiration set
# has less then idle_flow_tolerance_interval to live, we expire it
# idle_flow_tolerance_interval = 10000
# bgpd options
# path to directory containing bgpd binary, default is /usr/sbin
#bgpd_binary = /usr/sbin # for RHEL
#bgpd_binary = /usr/lib/quagga/ # for ubuntu
# path to directory containing bgpd.conf configuration file for bgpd
#bgpd_config = /etc/quagga # default value
# number of threads dedicated to packet processing
simulation_threads = 1
# number of datapath output channels
output_channels = 1
# threading model for datapath input channels. There is one channel per port.
# Allowed values are:
# + one_to_many: use one thread to service all ports
# + one_to_one: use one thread to service each port
input_channel_threading = one_to_many
# dashboard, experimental
enable_dashboard=false
jetty_xml=/etc/midolman/jetty/etc/jetty.xml
# location of the exterior vxlan vport uuid to vni key map (as a json object)
#uuid_vni_json_mapping_file=/etc/midolman/uuidtovni.json
[host]
# This file holds the host UUID across reboots. It is created when
# midolman is first executed in the host, by default it will be stored
# in /etc/midolman/
#properties_file = /etc/midolman/host_uuid.properties
wait_time_between_scans = 5000 # 5 * 1000 millis
[datapath]
# This option specifies the value of the udp port used for vxlan tunnelling
# to peer vteps. By default it is set to the standardized vxlan udp port value
# which is 4789.
#vxlan_vtep_udp_port = 4789
# This option specifies the value of the udp port used for vxlan tunnelling
# of overlay traffic from midolman hosts to other midolman hosts. The value
# needs to be the same across the cluster. It also needs to be different from
# the vxlan_vtep_udp_port value.
vxlan_overlay_udp_port = 6677
# Maximum number of flows a given datapath will be able to contain.
max_flow_count = 20000
# Maximum number of wildcard flows a given datapath will be able to contain.
max_wildcard_flow_count = 20000
# Midolman uses a pool of reusable buffers to send requests to the
# datapath. The options below tune the pool's size and that of its
# buffers. One pool is created for each output channel, the settings
# defined here will apply to each of those pools.
# max_size: maximum number of buffers to hold in the pool. When the
# pool is empty (all buffers are in use) and has reached
# its maximum size, temporary buffers will be allocated.
send_buffer_pool_max_size = 2048
# initial_size: initial number of buffers to allocate in the pool
send_buffer_pool_initial_size = 2048
# buf_size_kb: size of each buffer, in kb. Maximum total pool size would thus
# be: max_size * buf_size_kb. Beware that the buffer size puts a
# limit on the packet size that Midolman can send. In a network
# jumbo frames, adjust the size so that one buffer will accomodate
# a whole frame plus enough room for the flow's actions.
send_buffer_pool_buf_size_kb = 8
# How many datapath messages to process in each batch, increasing througput
# by reducing synchronization costs. Too high a value may hurt latency.
msgs_per_batch = 200
# Midolman limits the amount of packets in flight in the system at any
# given time. This prevents its internal queues from growing infinitely.
# Additionally, midolman ensures that its processing capacity is shared
# fairly among ports connected to the datapath. This, for example,
# would prevent a single VM from setting up new flows at a rate that
# would starve other VMs in the system.
#
# This behaviour is achieved by routing packets that miss the datapath
# flow table and rise to userspace through a Hierarchical Token Bucket.
# This HTB is set up in such a way such that tunnel ports will get 50%
# of the resources, and the remaining 50% is shared fairly among all
# other ports (typically, VMs).
#
# The rate at which the buckets are refilled is automatic and dynamic.
# However the size of the buckets at each stage of the HTB can be tuned
# through the settings below, increasing a bucket size will increase the
# burstiness at which traffic can be queued before new tokens become
# available.
#
# Bucket size is measured in packets.
# global_incoming_burts_capacity: size of the root bucket in the HTB.
global_incoming_burst_capacity = 128
# tunnel_incoming_burst_capacity: bucket size for tunnel ports (GRE, VxLAN)
tunnel_incoming_burst_capacity = 64
# vm_incoming_burst_capacity: bucket size for VM ports
vm_incoming_burst_capacity = 16
# vtep_incoming_burst_capacity: bucket size for VTEP (VxLAN) ports.
vtep_incoming_burst_capacity = 64
[haproxy_health_monitor]
# Health monitor is disabled by default. Please change the following value to
# true to activate it.
Health_monitor_enable = false