Aggregate jenkins modules into one module.

Put jenkins_master into jenkins::master and jenkins_job_builder into
jenkins::job_builder and jenkins_slave into jenkins::slave.

Change-Id: Icb0e3071894730c17d8f36f49e9d34979d9c568e
Reviewed-on: https://review.openstack.org/11249
Approved: Monty Taylor <mordred@inaugust.com>
Reviewed-by: Monty Taylor <mordred@inaugust.com>
Tested-by: Jenkins
This commit is contained in:
Monty Taylor 2012-08-04 14:30:15 -05:00 committed by Jenkins
parent be854cd76a
commit dab475b8b2
279 changed files with 73 additions and 17152 deletions

6
.gitignore vendored
View File

@ -1,6 +0,0 @@
*.swp
*~
*.pyc
applytest
doc/html/
manifests/secrets.pp

View File

@ -1,4 +0,0 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/openstack-ci-puppet.git

View File

@ -1,12 +0,0 @@
These are a set of puppet manifests and modules that are currently being
used to manage the OpenStack CI infrastructure.
The main entry point is in manifests/site.py.
In general, most of the modules here are designed to be able to be run
either in agent or apply mode.
These puppet modules require puppet 2.7 or greater. Additionally, the
site.pp manifest assumes the existence of hiera.
See http://ci.openstack.org for more information.

View File

@ -1,216 +0,0 @@
# -*- coding: utf-8 -*-
#
# OpenStack CI documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 18 13:42:23 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenStack CI'
copyright = u'2011, Monty Taylor, James Blair and Andrew Hutchings'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "%d.%02d" % (datetime.datetime.now().year, datetime.datetime.now().month)
# The full version, including alpha/beta/rc tags.
release = "%d.%02d.%02d" % (datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackCIdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OpenStackCI.tex', u'OpenStack CI Documentation',
u'Monty Taylor and James Blair', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openstackci', u'OpenStack CI Documentation',
[u'Monty Taylor, James Blair and Andrew Hutchings'], 1)
]

File diff suppressed because it is too large Load Diff

View File

@ -1,46 +0,0 @@
.. OpenStack CI documentation master file, created by
sphinx-quickstart on Mon Jul 18 13:42:23 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
OpenStack Continuous Integration
================================
This documentation covers the installation and maintenance of the
Continuous Integration (CI) infrastructure used by OpenStack. It
may be of interest to people who may want to help develop this
infrastructure or integrate their tools into it. Some instructions
may be useful to other projects that want to set up similar CI
systems.
OpenStack developers or users do not need to read this documentation.
Instead, see http://wiki.openstack.org/ to learn how contribute to or
use OpenStack.
Howtos:
.. toctree::
:maxdepth: 2
third_party
Contents:
.. toctree::
:maxdepth: 2
systems
jenkins
gerrit
puppet
puppet_modules
jenkins_jobs
meetbot
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,340 +0,0 @@
:title: Jenkins Configuration
Jenkins
#######
Overview
********
Jenkins is a Continuous Integration system and the central control
system for the orchestration of both pre-merge testing and post-merge
actions such as packaging and publishing of documentation.
The overall design that Jenkins is a key part of implementing is that
all code should be reviewed and tested before being merged in to trunk,
and that as many tasks around review, testing, merging and release that
can be automated should be.
Jenkis is essentially a job queing system, and everything that is done
through Jenkins can be thought of as having a few discreet components:
* Triggers - What causes a job to be run
* Location - Where do we run a job
* Steps - What actions are taken when the job runs
* Results - What is the outcome of the job
The OpenStack Jenkins can be found at http://jenkins.openstack.org
OpenStack uses :doc:`gerrit` to manage code reviews, which in turns calls
Jenkins to test those reviews.
Authorization
*************
Jenkins is set up to use OpenID in a Single Sign On mode with Launchpad.
This means that all of the user and group information is managed via
Launchpad users and teams. In the Jenkins Security Matrix, a Launchpad team
name can be specified and any members of that team will be granted those
permissions. However, because of the way the information is processed, a
user will need to re-log in upon changing either team membership on
Launchpad, or changing that team's authorization in Jenkins for the new
privileges to take effect.
Integration Testing
*******************
TODO: How others can get involved in testing and integrating with
OpenStack Jenkins.
Rackspace Bare-Metal Testing Cluster
====================================
The CI team mantains a cluster of machines supplied by Rackspace to
perform bare-metal deployment and testing of OpenStack as a whole.
This installation is intended as a reference implementation of just
one of many possible testing platforms, all of which can be integrated
with the OpenStack Jenkins system. This is a cluster of several
physical machines meaning the test environment has access to all of
the native processor features, and real-world networking, including
tagged VLANs.
Each time the trunk repo is updated, a Jenkins job will deploy an
OpenStack cluster using devstack and then run the openstack-test-rax
test suite against the cluster.
Deployment and Testing Process
------------------------------
The cluster deployment is divided into two phases: base operating
system installation, and OpenStack installation. Because the
operating system install takes considerable time (15 to 30 minutes),
has external network resource dependencies (the distribution mirror),
and has no bearing on the outcome of the OpenStack tests themselves,
the process used here effectively snapshots the machines immediately
after the base OS install and before OpenStack is installed. LVM
snapshots and kexec are used to immediately return the cluster to a
newly installed state without incurring the additional time it would
take to install from scratch. The Jenkins testing job invokes the
process starting at :ref:`rax_openstack_install`.
Installation Server Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The CI team runs the Ubuntu Orchestra server (based on cobbler) on our
Jenkins slave node to manage the OS installation on the test machines.
The configuration for the Orchestra server is kept in the CI team's
puppet modules. If you want to set up your own system, Orchestra is
not required, any system capable of performing the following steps is
suitable. However, if you want to stand up a test system as quickly
and simply as possible, you may find it easiest to base your system on
the one the CI team uses. You may use the puppet modules yourself, or
follow the instructions below.
The CI team's Orchestra configuration module is at:
https://github.com/openstack/openstack-ci-puppet/tree/master/modules/orchestra
Install Orchestra
"""""""""""""""""
Install Ubuntu 11.10 (Oneiric) and Orchestra::
sudo apt-get install ubuntu-orchestra-server ipmitool
The install process will prompt you to enter a password for Cobbler.
Have one ready and keep it in a safe place. The procedure here will
not use it, but if you later want to use the Cobbler web interface,
you will need it.
Configure Orchestra
"""""""""""""""""""
Install the following files on the Orchestra server so that it deploys
machines with our LVM/kexec test framework.
We update the dnsmasq.conf cobbler template to add
"dhcp-ignore=tag:!known", and some site-specific network
configuration::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/dnsmasq.template \
-O /etc/cobbler/dnsmasq.template
Our servers need a kernel module blacklisted in order to boot
correctly. If you don't need to blacklist any modules, you should
either create an empty file here, or remove the reference to this file
from the preseed file later::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack_module_blacklist \
-O /var/lib/cobbler/snippets/openstack_module_blacklist
This cobbler snippet uses cloud-init to set up the LVM/kexec
environment and configures TCP syslogging to the installation
server/Jenkins slave::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack_cloud_init \
-O /var/lib/cobbler/snippets/openstack_cloud_init
This snippet holds the mysql root password that will be configured at
install time. It's currently a static string, but you could
dynamically write this file, or simply replace it with something more
secure::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack_mysql_password \
-O /var/lib/cobbler/snippets/openstack_mysql_password
This preseed file manages the OS install on the test nodes. It
includes the snippets installed above::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/openstack-test.preseed \
-O /var/lib/cobbler/kickstarts/openstack-test.preseed
The following sudoers configuration is needed to allow Jenkins to
control cobbler, remove syslog files from the test hosts before
starting new tests, and restart rsyslog::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/orchestra-jenkins-sudoers -O /etc/sudoers.d/orchestra-jenkins
Replace the Orchestra rsyslog config file with a simpler one that logs
all information from remote hosts in one file per host::
wget https://raw.github.com/openstack/openstack-ci-puppet/master/modules/orchestra/files/99-orchestra.conf -O /etc/rsyslog.d/99-orchestra.conf
Make sure the syslog directories exist and restart rsyslog::
mkdir -p /var/log/orchestra/rsyslog/
chown -R syslog.syslog /var/log/orchestra/
restart rsyslog
Add an "OpenStack Test" system profile to cobbler that uses the
preseed file above::
cobbler profile add \
--name=natty-x86_64-ostest \
--parent=natty-x86_64 \
--kickstart=/var/lib/cobbler/kickstarts/openstack-test.preseed \
--kopts="priority=critical locale=en_US"
Add each of your systems to cobbler with a command similar to this
(you may need different kernel options)::
cobbler system add \
--name=baremetal1 \
--hostname=baremetal1 \
--profile=natty-x86_64-ostest \
--mac=00:11:22:33:44:55 \
--power-type=ipmitool \
--power-user=IPMI_USERNAME \
--power-pass=IPMI_PASS \
--power-address=IPMI_IP_ADDR \
--ip-address=SYSTEM_IP_ADDRESS \
--subnet=SYSTEM_SUBNET \
--kopts="netcfg/choose_interface=auto netcfg/dhcp_timeout=60 auto=true priority=critical"
When complete, have cobbler write out its configuration files::
cobbler sync
Set Up Jenkins Jobs
"""""""""""""""""""
We have Jenkins jobs to handle all of the tasks after the initial
Orchestra configuration so that we can easily run them at any time.
This includes the OS installation on the test nodes, even though we
don't run that often because the state is preserved in an LVM
snapshot, we may want to change the configuration used and make a new
snapshot. In that case we just need to trigger the Jenkins job again.
The Jenkins job that kicks off the operating system installation calls
the "baremetal-os-install.sh" script from the openstack-ci repo:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/baremetal-os-install.sh
That script instructs cobbler to install the OS on each of the test
nodes.
To speed up the devstack installation and avoid excessive traffic to
the pypi server, we build a PIP package cache on the installation
server. That is also an infrequent task that we configure as a
jenkins job. That calls:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/update-pip-cache.sh
That builds a PIP package cache that the test script later copies to
the test servers for use by devstack.
Run those two jobs, and once complete, the test nodes are ready to go.
This is the end of the operating system installation, and the system
is currently in the pristine state that will be used by the test
procedure (which is stored in the LVM volume "orig_root").
.. _rax_openstack_install:
OpenStack Installation
~~~~~~~~~~~~~~~~~~~~~~
When the deployment and integration test job runs, it does the
following, each time starting from the pristine state arrived at the
end of the previous section.
Reset the Test Nodes
""""""""""""""""""""
The Jenkins deployment and test job first runs the deployment script:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/baremetal-deploy.sh
Which invokes the following script on each host to reset it to the
pristine state:
https://github.com/openstack/openstack-ci/blob/master/slave_scripts/lvm-kexec-reset.sh
Because kexec is in use, resetting the environment and rebooting into
the pristine state takes only about 3 seconds.
The deployment script then removes the syslog files from the previous
run and restarts rsyslog to re-open them. Once the first test host
finishes booting and brings up its network, OpenStack installation
starts.
Run devstack on the Test Nodes
""""""""""""""""""""""""""""""
Devstack's build_bm_multi script is run, which invokes devstack on
each of the test nodes. First on the "head" node which runs all of
the OpenStack services for the remaining "compute" nodes.
Run Test Suite
""""""""""""""
Once devstack is complete, the test suite is run. All logs from the
test nodes should be sent via syslog to the Jenkins slave, and at the
end of the test, the logs are archived with the Job for developers to
inspect in case of problems.
Cluster Configuration
---------------------
Here are the configuration parameters of the CI team's test cluster.
The cluster is currently divided into three mini-clusters so that
independent Jenkins jobs can run in parallel on the different
clusters.
VLANs
~~~~~
+----+--------------------------------+
|VLAN| Description |
+====+================================+
|90 | Native VLAN |
+----+--------------------------------+
|91 | Internal cluster communication |
| | network: 192.168.91.0/24 |
+----+--------------------------------+
|92 | Public Internet (fake) |
| | network: 192.168.92.0/24 |
+----+--------------------------------+
Servers
~~~~~~~
The servers are located on the Rackspace network, only accessible via
VPN.
+-----------+--------------+---------------+
| Server | Primary IP | Management IP |
+===========+==============+===============+
|deploy-rax | 10.14.247.36 | 10.14.247.46 |
+-----------+--------------+---------------+
|baremetal1 | 10.14.247.37 | 10.14.247.47 |
+-----------+--------------+---------------+
|baremetal2 | 10.14.247.38 | 10.14.247.48 |
+-----------+--------------+---------------+
|baremetal3 | 10.14.247.39 | 10.14.247.49 |
+-----------+--------------+---------------+
|baremetal4 | 10.14.247.40 | 10.14.247.50 |
+-----------+--------------+---------------+
|baremetal5 | 10.14.247.41 | 10.14.247.51 |
+-----------+--------------+---------------+
|baremetal6 | 10.14.247.42 | 10.14.247.52 |
+-----------+--------------+---------------+
|baremetal7 | 10.14.247.43 | 10.14.247.53 |
+-----------+--------------+---------------+
|baremetal8 | 10.14.247.44 | 10.14.247.54 |
+-----------+--------------+---------------+
|baremetal9 | 10.14.247.45 | 10.14.247.55 |
+-----------+--------------+---------------+
deploy-rax
The deployment server and Jenkins slave. It deploys the servers
using Orchestra and Devstack, and runs the test framework. It
should not run any OpenStack components, but we can install
libraries or anything else needed to run tests.
baremetal1, baremetal4, baremetal7
Configured as "head" nodes to run nova, mysql, and glance. Each one
is the head node of a three node cluster including the two compute
nodes following it
baremetal2-3, baremtal5-6, baremetal8-9
Configured as compute nodes for each of the three mini-clusters.

View File

@ -1,184 +0,0 @@
Jenkins Job Builder
===================
Overview
--------
In order to make the process of managing hundreds of Jenkins Jobs easier a
Python based utility was designed to take YAML based configurations and convert
those into jobs that are injected into Jenkins.
Adding a project
----------------
The YAML scripts to make this work are stored in the ``openstack-ci-puppet``
repository in the ``modules/jenkins_jobs/files/projects/site/project.yaml``
directory. Where ``site`` is either `openstack` or `stackforge` and ``project``
is the name of the project the YAML file is for.
Once the YAML file is added the puppet module needs to be told that the project
is there. For example:
.. code-block:: ruby
:linenos:
class { "jenkins_jobs":
site => "stackforge",
projects => ['reddwarf', 'ceilometer']
}
In this example the YAML files for `reddwarf` and `ceilometer` in the
`stackforge` projects directory will be executed.
YAML Format
-----------
The bare minimum YAML needs to look like this:
.. code-block:: yaml
:linenos:
---
modules:
- properties
- scm
- assignednode
- trigger_none
- builders
- publisher_none
main:
name: 'job-name'
review_site: 'review.stackforge.org'
github_org: 'stackforge'
project: 'project'
authenticatedBuild: 'false'
disabled: 'false'
or for a templated project:
.. code-block:: yaml
:linenos:
project:
template: 'python_jobs'
values:
name: 'cinder'
disabled: 'false'
github_org: 'openstack'
review_site: 'review.openstack.org'
publisher_site: 'nova.openstack.org'
The first example starts with ``---``, this signifies the start of a job, there
can be multiple jobs per project file. The file does not need to start with the
``---`` but jobs do need to be separated by it. Each YAML file can contain any
combination of templated or normal jobs.
In the first example the ``modules`` entry is an array of modules that should be
loaded for this job. Modules are located in the
``modules/jenkins_jobs/files/modules/`` directory and are python scripts to
generate the required XML. Each module has a comment near the top showing the
required YAML to support that module. The follow modules are required to
generate a correct XML that Jenkins will support:
* properties (supplies the <properties> XML data)
* scm (supplies the <scm> XML data, required even is scm is not used
* trigger_* (a trigger module is required)
* builders
* publisher_* (a publisher module is required)
Each module also requires a ``main`` section which has the main data for the
modules, inside this there is:
* name - the name of the job
* review_site - review.openstack.org or review.stackforge.org
* github_org - the parent of the github branch for the project (typically `openstack` or `stackforge`
* project - the name of the project
* authenticatedBuild - whether or not you need to be authenticated to hit the
build button
* disabled - whether or not this job should be disabled
In the templated example there is the ``project`` tag to specify that this is
a templated project. The ``template`` value specified a template file found in
the ``modules/jenkins_jobs/files/templates`` directory. The template will look
like a regular set of jobs but contain values in caps surrounded by '@' symbols.
The template process takes the parameters specified in the ``values`` section
and replaces the values surrounded by the '@' symbol.
As an example in the template:
.. code-block:: yaml
main:
name: 'gate-@NAME@-pep8'
Using the above example of a templated job the ``@NAME@`` would be replaced with
``cinder``.
Testing a Job
-------------
Once a new YAML file has been created its output can be tested by using the
``jenkins_jobs.py`` script directly. For example:
.. code-block:: bash
$ python jenkins_jobs.py test projects/openstack/cinder.yml
This will spit out the XML that would normally be sent directly to Jenkins.
Job Caching
-----------
The Jenkins Jobs builder maintains a special YAML file in
``~/.jenkins_jobs_cache.yml``. This contains an MD5 of every generated XML that
it builds. If it finds the XML is different then it will proceed to send this
to Jenkins, otherwise it is skipped. If a job is accidentally deleted then this
file should be modified or removed.
Sending a Job to Jenkins
------------------------
The Jenkins Jobs builder talks to Jenkins using the Jenkins API. This means
that it can create and modify jobs directly without the need to restart or
reload the Jenkins server. It also means that Jenkins will verify the XML and
cause the Jenkins Jobs builder to fail if there is a problem.
For this to work a configuration file is needed. This needs to be stored in
``/root/secret-files/jenkins_jobs.ini`` and puppet will automatically put it in
the right place. The format for this file is as follows:
.. code-block:: ini
[jenkins]
user=username
password=password
url=jenkins_url
The password can be obtained by logging into the Jenkins user, clicking on your
username in the top-right, clicking on `Configure` and then `Show API Token`.
This API Token is your password for the API.
Adding a Module
---------------
Modules need to contain a class with the same name as the filename. The basic
layout is:
.. code-block:: python
import xml.etree.ElementTree as XML
class my_module(object):
def __init__(self, data):
self.data = data
def gen_xml(self, xml_parent):
The ``__init__`` function will be provided with ``data`` which is a Python
dictionary representing the YAML data for the job.
The ``gen_xml`` function will be provided with ``xml_parent`` which is an
XML ElementTree object to be modified.

View File

@ -1,89 +0,0 @@
Meetbot
==============
Overview
--------
The OpenStack CI team run a slightly modified
`Meetbot <http://wiki.debian.org/MeetBot>`_ to log IRC channel activity and
meeting minutes. Meetbot is a plugin for
`Supybot <http://sourceforge.net/projects/supybot/>`_ which adds meeting
support features to the Supybot IRC bot.
Supybot
-------
In order to run Meetbot you will need to get Supybot. You can find the latest
release `here <http://sourceforge.net/projects/supybot/files/>`_. Once you have
extracted the release you will want to read the ``INSTALL`` and
``doc/GETTING_STARTED`` files. Those two files should have enough information to
get you going, but there are other goodies in ``doc/``.
Once you have Supybot installed you will need to configure a bot. The
``supybot-wizard`` command can get you started with a basic config, or you can
have Puppet do the heavy lifting. The OpenStack CI Meetbot Puppet module creates
a configuration and documentation for that module is at
:ref:`Meetbot_Puppet_Module`.
One important config setting is ``supybot.reply.whenAddressedBy.chars``, which
sets the prefix character for this bot. This should be set to something other
than ``#`` as ``#`` will conflict with Meetbot (you can leave the setting blank
if you don't want a prefix character).
Meetbot
-------
The OpenStack CI Meetbot fork can be found at
https://github.com/openstack-ci/meetbot. Manual installation of the Meetbot
plugin is straightforward and documented in that repository's README.
OpenStack CI installs and configures Meetbot through Puppet. Documentation for
the Puppet module that does that can be found at :ref:`Meetbot_Puppet_Module`.
Voting
^^^^^^
The OpenStack CI Meetbot fork adds simple voting features. After a meeting has
been started a meeting chair can begin a voting block with the ``#startvote``
command. The command takes two arguments, a question posed to voters (ending
with a ``?``), and the valid voting options. If the second argument is missing
the default options are "Yes" and "No". For example:
``#startvote Should we vote now? Yes, No, Maybe``
Meeting participants vote using the ``#vote`` command. This command takes a
single argument, which should be one of the options listed for voting by the
``#startvote`` command. For example:
``#vote Yes``
Note that you can vote multiple times, but only your last vote will count.
One can check the current vote tallies useing the ``#showvote`` command, which
takes no arguments. This will list the number of votes and voters for each item
that has votes.
When the meeting chair(s) are ready to stop the voting process they can issue
the ``#endvote`` command, which takes no arguments. Doing so will report the
voting results and log these results in the meeting minutes.
A somewhat contrived voting example:
::
foo | #startvote Should we vote now? Yes, No, Maybe
meetbot | Begin voting on: Should we vote now? Valid vote options are Yes, No, Maybe.
meetbot | Vote using '#vote OPTION'. Only your last vote counts.
foo | #vote Yes
bar | #vote Absolutely
meetbot | bar: Absolutely is not a valid option. Valid options are Yes, No, Maybe.
bar | #vote Yes
bar | #showvote
meetbot | Yes (2): foo, bar
foo | #vote No
foo | #showvote
meetbot | Yes (1): bar
meetbot | No (1): foo
foo | #endvote
meetbot | Voted on "Should we vote now?" Results are
meetbot | Yes (1): bar
meetbot | No (1): foo

View File

@ -1,147 +0,0 @@
Puppet Master
=============
Overview
--------
Puppet agent is a mechanism use to pull puppet manifests and configuration
from a centralized master. This means there is only one place that needs to
hold secure information such as passwords, and only one location for the git
repo holding the modules.
Puppet Master
-------------
The puppet master is setup using a combination of Apache and mod passenger to
ship the data to the clients. To install this:
.. code-block:: bash
sudo apt-get install puppet puppetmaster-passenger
Files for puppet master are stored in a git repo clone at
``/opt/openstack-ci-puppet``. We have a ``root`` cron job that
automatically populates these from our puppet git repository as follows:
.. code-block:: bash
\*/15 * * * * sleep $((RANDOM\%600)) && cd /opt/openstack-ci-puppet && /usr/bin/git pull -q
The ``/etc/puppet/puppet.conf`` file then needs updating to point to the
manifest and modules as follows:
.. code-block:: ini
[master]
# These are needed when the puppetmaster is run by passenger
# and can safely be removed if webrick is used.
ssl_client_header = SSL_CLIENT_S_DN
ssl_client_verify_header = SSL_CLIENT_VERIFY
manifestdir=/opt/openstack-ci-puppet/manifests
modulepath=/opt/openstack-ci-puppet/modules
manifest=$manifestdir/site.pp
Hiera
-----
Hiera is used to maintain secret information on the puppetmaster.
We want to install hiera from puppetlabs' apt repo, but we don't want to get
on the puppet upgrade train - so the process is as follows:
.. code-block:: bash
echo "deb http://apt.puppetlabs.com precise devel" > /etc/apt/sources.list.d/puppetlabs.list
apt-get update
apt-get install hiera hiera-puppet
rm /etc/apt/sources.list.d/puppetlabs.list
apt-get update
Hiera uses a systemwide configuration file in ``/etc/puppet/hiera.yaml``
which tells is where to find subsequent configuration files.
.. code-block:: yaml
---
:hierarchy:
- %{operatingsystem}
- common
:backends:
- yaml
:yaml:
:datadir: '/etc/puppet/hieradata/%{environment}'
This setup supports multiple configuration. The two sets of environments
that OpenStack CI users are ``production`` and ``development``. ``production``
is the default is and the environment used when nothing else is specified.
Then the configuration needs to be placed into common.yaml in
``/etc/puppet/hieradata/production`` and ``/etc/puppet/hieradata/development``.
The values are simple key-value pairs in yaml format.
Adding a node
-------------
On the new server connecting (for example, review.openstack.org) to the puppet master:
.. code-block:: bash
sudo apt-get install puppet
Then edit the ``/etc/default/puppet`` file to change the start variable:
.. code-block:: ini
# Start puppet on boot?
START=yes
The node then needs to be configured to set a fixed hostname and the hostname
of the puppet master with the following additions to ``/etc/puppet/puppet.conf``:
.. code-block:: ini
[main]
server=ci-puppetmaster.openstack.org
certname=review.openstack.org
The cert signing process needs to be started with:
.. code-block:: bash
sudo puppet agent --test
This will make a request to the puppet master to have its SSL cert signed.
On the puppet master:
.. code-block:: bash
sudo puppet cert list
You should get a list of entries similar to the one below::
review.openstack.org (44:18:BB:DF:08:50:62:70:17:07:82:1F:D5:70:0E:BF)
If you see the new node there you can sign its cert on the puppet master with:
.. code-block:: bash
sudo puppet cert sign review.openstack.org
Finally on the puppet agent you need to start the agent daemon:
.. code-block:: bash
sudo service puppet start
Now that it is signed the puppet agent will execute any instructions for its
node on the next run (default is every 30 minutes). You can trigger this
earlier by restarting the puppet service on the agent node.
Important Notes
---------------
#. Make sure the site manifest **does not** include the puppet cron job, this
conflicts with puppet master and can cause issues. The initial puppet run
that create users should be done using the puppet agent configuration above.
#. If you do not see the cert in the master's cert list the agent's
``/var/log/syslog`` should have an entry showing you why.

View File

@ -1,387 +0,0 @@
Puppet Modules
==============
Overview
--------
Much of the OpenStack project infrastructure is deployed and managed using
puppet.
The OpenStack CI team manage a number of custom puppet modules outlined in this
document.
Doc Server
----------
The doc_server module configures nginx [3]_ to serve the documentation for
several specified OpenStack projects. At the moment to add a site to this
you need to edit ``modules/doc_server/manifests/init.pp`` and add a line as
follows:
.. code-block:: ruby
:linenos:
doc_server::site { "swift": }
In this example nginx will be configured to serve ``swift.openstack.org``
from ``/srv/docs/swift`` and ``swift.openstack.org/tarballs/`` from
``/srv/tarballs/swift``
Lodgeit
-------
The lodgeit module installs and configures lodgeit [1]_ on required servers to
be used as paste installations. For OpenStack we use
`a fork <https://github.com/openstack-ci/lodgeit>`_ of this which is based on
one with bugfixes maintained by
`dcolish <https://bitbucket.org/dcolish/lodgeit-main>`_ but adds back missing
anti-spam features required by Openstack.
Puppet will configure lodgeit to use drizzle [2]_ as a database backend,
nginx [3]_ as a front-end proxy and upstart scripts to run the lodgeit
instances. It will store and maintain local branch of the the mercurial
repository for lodgeit in ``/tmp/lodgeit-main``.
To use this module you need to add something similar to the following in the
main ``site.pp`` manifest:
.. code-block:: ruby
:linenos:
node "paste.openstack.org" {
include openstack_server
include lodgeit
lodgeit::site { "openstack":
port => "5000",
image => "header-bg2.png"
}
lodgeit::site { "drizzle":
port => "5001"
}
}
In this example we include the lodgeit module which will install all the
pre-requisites for Lodgeit as well as creating a checkout ready.
The ``lodgeit::site`` calls create the individual paste sites.
The name in the ``lodgeit::site`` call will be used to determine the URL, path
and name of the site. So "openstack" will create ``paste.openstack.org``,
place it in ``/srv/lodgeit/openstack`` and give it an upstart script called
``openstack-paste``. It will also change the h1 tag to say "Openstack".
The port number given needs to be a unique port which the lodgeit service will
run on. The puppet script will then configure nginx to proxy to that port.
Finally if an image is given that will be used instead of text inside the h1
tag of the site. The images need to be stored in the ``modules/lodgeit/files``
directory.
Lodgeit Backups
^^^^^^^^^^^^^^^
The lodgeit module will automatically create a git repository in ``/var/backups/lodgeit_db``. Inside this every site will have its own SQL file, for example "openstack" will have a file called ``openstack.sql``. Every day a cron job will update the SQL file (one job per file) and commit it to the git repository.
.. note::
Ideally the SQL files would have a row on every line to keep the diffs stored
in git small, but ``drizzledump`` does not yet support this.
Planet
------
The planet module installs Planet Venus [4]_ along with required dependancies
on a server. It also configures specified planets based on options given.
Planet Venus works by having a cron job which creates static files. In this
module the static files are served using nginx [3]_.
To use this module you need to add something similar to the following into the
main ``site.pp`` manifest:
.. code-block:: ruby
:linenos:
node "planet.openstack.org" {
include planet
planet::site { "openstack":
git_url => "https://github.com/openstack/openstack-planet.git"
}
}
In this example the name "openstack" is used to create the site
``paste.openstack.org``. The site will be served from
``/srv/planet/openstack/`` and the checkout of the ``git_url`` supplied will
be maintained in ``/var/lib/planet/openstack/``.
This module will also create a cron job to pull new feed data 3 minutes past each hour.
The ``git_url`` parameter needs to point to a git repository which stores the
planet.ini configuration for the planet (which stores a list of feeds) and any required theme data. This will be pulled every time puppet is run.
.. _Meetbot_Puppet_Module:
Meetbot
-------
The meetbot module installs and configures meetbot [5]_ on a server. The
meetbot version installed by this module is pulled from the
`Openstack CI fork <https://github.com/openstack-ci/meetbot/>`_ of the project.
It also configures nginix [3]_ to be used for accessing the public IRC logs of
the meetings.
To use this module simply add a section to the site manifest as follows:
.. code-block:: ruby
:linenos:
node "eavesdrop.openstack.org" {
include openstack_cron
class { 'openstack_server':
iptables_public_tcp_ports => [80]
}
include meetbot
meetbot::site { "openstack":
nick => "openstack",
network => "FreeNode",
server => "chat.us.freenode.net:7000",
url => "eavesdrop.openstack.org",
channels => "#openstack #openstack-dev #openstack-meeting",
use_ssl => "True"
}
}
You will also need a file ``/root/secret-files/name-nickserv.pass`` where `name`
is the name specified in the call to the module (`openstack` in this case).
Each call to meetbot::site will create setup a meebot in ``/var/lib/meetbot``
under a subdirectory of the name of the call to the module. It will also
configure nginix to go to that site when the ``/meetings`` directory is
specified on the URL.
The puppet module also creates startup scripts for meetbot and will ensure that
it is running on each puppet run.
Gerrit
------
The Gerrit puppet module configures the basic needs of a Gerrit server. It does
not (yet) install Gerrit itself and mostly deals with the configuration files
and skinning of Gerrit.
Using Gerrit
^^^^^^^^^^^^
Gerrit is set up when the following class call is added to a node in the site
manifest:
.. code-block:: ruby
class { 'gerrit':
canonicalweburl => "https://review.openstack.org/",
email => "review@openstack.org",
github_projects => [
'openstack/nova',
'stackforge/MRaaS',
],
logo => 'openstack.png'
}
Most of these options are self-explanitory. The ``github_projects`` is a list of
all projects in GitHub which are managed by the gerrit server.
Skinning
^^^^^^^^
Gerrit is skinned using files supplied by the puppet module. The skin is
automatically applied as soon as the module is executed. In the site manifest
setting the logo is important:
.. code-block:: ruby
class { 'gerrit':
...
logo => 'openstack.png'
}
This specifies a PNG file which must be stored in the ``modules/gerrit/files/``
directory.
Jenkins Master
--------------
The Jenkins Master puppet module installs and supplies a basic Jenkins
configuration. It also supplies a skin to Jenkins to make it look more like an
OpenStack site. It does not (yet) install the additional Jenkins plugins used
by the OpenStack project.
Using Jenkins Master
^^^^^^^^^^^^^^^^^^^^
In the site manifest a node can be configured to be a Jenkins master simply by
adding the class call below:
.. code-block:: ruby
class { 'jenkins_master':
site => 'jenkins.openstack.org',
serveradmin => 'webmaster@openstack.org',
logo => 'openstack.png'
}
The ``site`` and ``serveradmin`` parameters are used to configure Apache. You
will also need in this instance the following files for Apache to start::
/etc/ssl/certs/jenkins.openstack.org.pem
/etc/ssl/private/jenkins.openstack.org.key
/etc/ssl/certs/intermediate.pem
The ``jenkins.openstack.org`` is replace by the setting in the ``site``
parameter.
Skinning
^^^^^^^^
The Jenkins skin uses the `Simple Theme Plugin
<http://wiki.jenkins-ci.org/display/JENKINS/Simple+Theme+Plugin>`_ for Jenkins.
The puppet module will install and configure most aspects of the skin
automatically, with a few adjustments needed.
In the site.pp file the ``logo`` parameter is important:
.. code-block:: ruby
class { 'jenkins_master':
...
logo => 'openstack.png'
}
This relates to a PNG file that must be in the ``modules/jenkins_master/files/``
directory.
Once puppet installs this and the plugin is installed you need to go into
``Manage Jenkins -> Configure System`` and look for the ``Theme`` heading.
Assuming we are skinning the main OpenStack Jenkins site, in the ``CSS`` box
enter
``https://jenkins.openstack.org/plugin/simple-theme-plugin/openstack.css`` and
in the ``JS`` box enter
``https://jenkins.openstack.org/plugin/simple-theme-plugin/openstack.js``.
Etherpad Lite
-------------
This Puppet module installs Etherpad Lite [6]_ and its dependencies (including
node.js). This Puppet module also configures Etherpad Lite to be started at
boot with Nginx running in front of it as a reverse proxy and MySQL running as
the database backend.
Using this module is straightforward you simply need to include a few classes.
However, there are some limitations to be aware of which are described below.
The includes you need are:
::
include etherpad_lite # Acts like a package manager and installs things
include etherpad_lite::nginx # Sets up Nginx to reverse proxy Etherpad Lite
include etherpad_lite::site # Configures Etherpad Lite
include etherpad_lite::mysql # Configures MySQL DB backend for Etherpad Lite
These classes are parameterized and provide some configurability, but should
all work together when instantiated with their defaults.
Config File
^^^^^^^^^^^
Because the Etherpad Lite configuration file contains a database password it is
not directly managed by Puppet. Instead Puppet expects the configuration file
to be at ``/root/secret-files/etherpad-lite_settings.json`` on the Puppet
master (if running in master/agent setup) or on the server itself if running
``puppet apply``.
MySQL will be configured by Puppet to listen on TCP 3306 of localhost and a
database called ``etherpad-lite`` will be created for user ``eplite``. Also,
this module does install the Abiword package. Knowing this, a good template for
your config is:
::
/*
This file must be valid JSON. But comments are allowed
Please edit settings.json, not settings.json.template
*/
{
//Ip and port which etherpad should bind at
"ip": "127.0.0.1",
"port" : 9001,
//The Type of the database. You can choose between dirty, sqlite and mysql
//You should use mysql or sqlite for anything else than testing or development
"dbType" : "mysql",
//the database specific settings
"dbSettings" : {
"user" : "eplite",
"host" : "localhost",
"password": "changeme",
"database": "etherpad-lite"
},
//the default text of a pad
"defaultPadText" : "Welcome to Etherpad Lite!\n\nThis pad text is synchronized as you type, so that everyone viewing this page sees the same text. This allows you to collaborate seamlessly on documents!\n\nEtherpad Lite on Github: http:\/\/j.mp/ep-lite\n",
/* Users must have a session to access pads. This effectively allows only group pads to be accessed. */
"requireSession" : false,
/* Users may edit pads but not create new ones. Pad creation is only via the API. This applies both to group pads and regular pads. */
"editOnly" : false,
/* if true, all css & js will be minified before sending to the client. This will improve the loading performance massivly,
but makes it impossible to debug the javascript/css */
"minify" : true,
/* How long may clients use served javascript code? Without versioning this
is may cause problems during deployment. */
"maxAge" : 21600000, // 6 hours
/* This is the path to the Abiword executable. Setting it to null, disables abiword.
Abiword is needed to enable the import/export of pads*/
"abiword" : "/usr/bin/abiword",
/* This setting is used if you need http basic auth */
// "httpAuth" : "user:pass",
/* The log level we are using, can be: DEBUG, INFO, WARN, ERROR */
"loglevel": "INFO"
}
Don't forget to change the password if you copy this configuration. Puppet will
grep that password out of the config and use it to set the password for the
MySQL eplite user.
Nginx
^^^^^
The reverse proxy is configured to talk to Etherpad Lite over localhost:9001.
Nginx listens on TCP 443 for HTTPS connections. Because HTTPS is used you will
need SSL certificates. These files are not directly managed by Puppet (again
because of the sensitive nature of these files), but Puppet will look for
``/root/secret-files/eplite.crt`` and ``/root/secret-files/eplite.key`` and
copy them to ``/etc/nginx/ssl/eplite.crt`` and ``/etc/nginx/ssl/eplite.key``,
which is where Nginx expects them to be.
MySQL
^^^^^
MySQL is configured by the Puppet module to allow user ``eplite`` to use
database ``etherpad-lite``. If you want backups for the ``etherpad-lite``
database you can include ``etherpad_lite::backup``. By default this will backup
the ``etherpad-lite`` DB daily and keep a rotation of 30 days of backups.
.. rubric:: Footnotes
.. [1] `Lodgeit homepage <http://www.pocoo.org/projects/lodgeit/>`_
.. [2] `Drizzle homepage <http://www.drizzle.org/>`_
.. [3] `nginx homepage <http://nginx.org/en/>`_
.. [4] `Planet Venus homepage <http://intertwingly.net/code/venus/docs/index.html>`_
.. [5] `Meetbot homepage <http://wiki.debian.org/MeetBot>`_
.. [6] `Etherpad Lite homepage <https://github.com/Pita/etherpad-lite>`_

View File

@ -1,121 +0,0 @@
:title: Infrastructure Systems
Infrastructure Systems
######################
The OpenStack CI team maintains a number of systems that are critical
to the operation of the OpenStack project. At the time of writing,
these include:
* Gerrit (review.openstack.org)
* Jenkins (jenkins.openstack.org)
* community.openstack.org
Additionally the team maintains the project sites on Launchpad and
GitHub. The following policies have been adopted to ensure the
continued and secure operation of the project.
SSH Access
**********
For any of the systems managed by the CI team, the following practices
must be observed for SSH access:
* SSH access is only permitted with SSH public/private key
authentication.
* Users must use a strong passphrase to protect their private key. A
passphrase of several words, at least one of which is not in a
dictionary is advised, or a random string of at least 16
characters.
* To mitigate the inconvenience of using a long passphrase, users may
want to use an SSH agent so that the passphrase is only requested
once per desktop session.
* Users private keys must never be stored anywhere except their own
workstation(s). In particular, they must never be stored on any
remote server.
* If users need to 'hop' from a server or bastion host to another
machine, they must not copy a private key to the intermediate
machine (see above). Instead SSH agent forwarding may be used.
However due to the potential for a compromised intermediate machine
to ask the agent to sign requests without the users knowledge, in
this case only an SSH agent that interactively prompts the user
each time a signing request (ie, ssh-agent, but not gnome-keyring)
is received should be used, and the SSH keys should be added with
the confirmation constraint ('ssh-add -c').
* The number of SSH keys that are configured to permit access to
OpenStack machines should be kept to a minimum.
* OpenStack CI machines must use puppet to centrally manage and
configure user accounts, and the SSH authorized_keys files from the
openstack-ci-puppet repository.
* SSH keys should be periodically rotated (at least once per year).
During rotation, a new key can be added to puppet for a time, and
then the old one removed. Be sure to run puppet on the backup
servers to make sure they are updated.
Backups
*******
Off-site backups are made to two servers:
* ci-backup-rs-ord.openstack.org
* ci-backup-hp-az1.openstack.org
Puppet is used to perform the initial configuration of those machines,
but to protect them from unauthorized access in case access to the
puppet git repo is compromised, it is not run in agent or in cron mode
on them. Instead, it should be manually run when changes are made
that should be applied to the backup servers.
To start backing up a server, some commands need to be run manually on
both the backup server, and the server to be backed up. On the server
to be backed up::
ssh-keygen -t rsa -f /root/.ssh/id_rsa -N ""
And then ''cat /root/.ssh/id_rsa.pub'' for use later.
On the backup servers::
sudo su -
BUPUSER=bup-<short-servername> # eg, bup-jenkins-dev
useradd -r $BUPUSER -s /bin/bash -m
cd /home/$BUPUSER
mkdir .ssh
cat >.ssh/authorized_keys
and add this to the authorized_keys file::
command="BUP_DEBUG=0 BUP_FORCE_TTY=3 bup server",no-port-forwarding,no-agent-forwarding,no-X11-forwarding,no-pty <ssh key from earlier>
Switching back to the server to be backed up, run::
ssh $BUPUSER@ci-backup-rs-ord.openstack.org
ssh $BUPUSER@ci-backup-hp-az1.openstack.org
And verify the host key. Add the "backup" class in puppet to the server
to be backed up.
GitHub Access
*************
To ensure that code review and testing are not bypassed in the public
Git repositories, only Gerrit will be permitted to commit code to
OpenStack repositories. Because GitHub always allows project
administrators to commit code, accounts that have access to manage the
GitHub projects necessarily will have commit access to the
repositories. Therefore, to avoid inadvertent commits to the public
repositories, unique administrative-only accounts must be used to
manage the OpenStack GitHub organization and projects. These accounts
will not be used to check out or commit code for any project.
Launchpad Teams
***************
Each OpenStack project should have the following teams on Launchpad:
* foo -- contributors to project 'foo'
* foo-core -- core developers
* foo-bugs -- people interested in receieving bug reports
* foo-drivers -- people who may approve and target blueprints
The openstack-admins team should be a member of each of those teams.

View File

@ -1,153 +0,0 @@
HOWTO: Third Party Testing
==========================
Overview
--------
Gerrit has an event stream which can be subscribed to, using this it is possible
to test commits against testing systems beyond those supplied by OpenStack's
Jenkins setup. It is also possible for these systems to feed information back
into Gerrit and they can also leave non-gating votes on Gerrit review requests.
An example of one such system is `Smokestack <http://smokestack.openstack.org/>`_.
Smokestack reads the Gerrit event stream and runs it's own tests on the commits.
If one of the tests fails it will publish information and links to the failure
on the review in Gerrit.
Reading the Event Stream
------------------------
It is possible to use ssh to connect to ``review.openstack.org`` on port 29418
with your ssh key if you are signed up as an OpenStack developer on Launchpad.
This will give you a real-time JSON stream of events happening inside Gerrit.
For example:
.. code-block:: bash
$ ssh -p 29418 review.example.com gerrit stream-events
Will give a stream with an output like this (line breaks and indentation added
in this document for readability, the read JSON will be all one line per event):
.. code-block:: javascript
{"type":"comment-added","change":
{"project":"openstack/keystone","branch":"stable/essex","topic":"bug/969088","id":"I18ae38af62b4c2b2423e20e436611fc30f844ae1","number":"7385","subject":"Make import_nova_auth only create roles which don\u0027t already exist","owner":
{"name":"Chuck Short","email":"chuck.short@canonical.com","username":"zulcss"},"url":"https://review.openstack.org/7385"},
"patchSet":
{"number":"1","revision":"aff45d69a73033241531f5e3542a8d1782ddd859","ref":"refs/changes/85/7385/1","uploader":
{"name":"Chuck Short","email":"chuck.short@canonical.com","username":"zulcss"},
"createdOn":1337002189},
"author":
{"name":"Mark McLoughlin","email":"markmc@redhat.com","username":"markmc"},
"approvals":
[{"type":"CRVW","description":"Code Review","value":"2"},{"type":"APRV","description":"Approved","value":"0"}],
"comment":"Hmm, I actually thought this was in Essex already.\n\nIt\u0027s a pretty annoying little issue for folks migrating for nova auth. Fix is small and pretty safe. Good choice for backporting"}
For most purposes you will want to trigger on ``patchset-created`` for when a
new patchset has been uploaded.
Further documentation on how to use the events stream can be found in `Gerrit's stream event documentation page <http://gerrit-documentation.googlecode.com/svn/Documentation/2.3/cmd-stream-events.html>`_.
Posting Result To Gerrit
------------------------
External testing systems can give non-gating votes to Gerrit by means of a -1/+1
verify vote. OpenStack Jenkins has extra permissions to give a +2/-2 verify
vote which is gating. Comments should also be provided to explain what kind of
test failed.. We do also ask that the comments contain public links to the
failure so that the developer can see what caused the failure.
An example of how to post this is as follows:
.. code-block:: bash
$ ssh -p 29418 review.example.com gerrit review -m '"Test failed on MegaTestSystem <http://megatestsystem.org/tests/1234>"' --verified=-1 c0ff33
In this example ``c0ff33`` is the commit ID for the review. You can set the
verified to either `-1` or `+1` depending on whether or not it passed the tests.
Further documentation on the `review` command in Gerrit can be found in the `Gerrit review documentation page <http://gerrit-documentation.googlecode.com/svn/Documentation/2.3/cmd-review.html>`_.
We do suggest cautious testing of these systems and have a development Gerrit
setup to test on if required. In SmokeStack's case all failures are manually
reviewed before getting pushed to OpenStack, whilst this may no scale it is
advisable during initial testing of the setup.
.. _request-account-label:
Requesting a Service Account
----------------------------
To request a sevice acconut for your system you first need to create a new
account in LaunchPad. This account needs to be joined to the
`OpenStack Team <https://launchpad.net/~openstack>`_ or one of the related teams
so that Gerrit can pick it up. You can then contact the
OpenStack CI Admins via `email <mailto:openstack-ci-admins@lists.launchpad.net>`_
or the #openstack-infra IRC channel. We will set things up on Gerrit to
receive your system's votes.
Feel free to contact the CI team to arrange setting up a dedicated user so your
system can post reviews up using a system name rather than your user name.
The Jenkins Gerrit Trigger Plugin Way
-------------------------------------
There is a Gerrit Trigger plugin for Jenkins which automates all of the
processes described in this document. So if your testing system is Jenkins
based you can use it to simplify things. You will still need an account to do
this as described in the :ref:`request-account-label` section above.
The OpenStack version of the Gerrit Trigger plugin for Jenkins can be found on
`the Jenkins packaging job <https://jenkins.openstack.org/view/All/job/gerrit-trigger-plugin-package/lastSuccessfulBuild/artifact/gerrithudsontrigger/target/gerrit-trigger.hpi>`_ for it. You can install it using the Advanced tab in the
Jenkins Plugin Manager.
Once installed Jenkins will have a new `Gerrit Trigger` option in the `Manage
Jenkins` menu. This should be given the following options::
Hostname: review.openstack.org
Frontend URL: https://review.openstack.org/
SSH Port: 29418
Username: (the Launchpad user)
SSH Key File: (path to the user SSH key)
Verify
------
Started: 0
Successful: 1
Failed: -1
Unstable: 0
Code Review
-----------
Started: 0
Successful: 0
Failed: 0
Unstable: 0
(under Advanced Button):
Stated: (blank)
Successful: gerrit approve <CHANGE>,<PATCHSET> --message 'Build Successful <BUILDS_STATS>' --verified <VERIFIED> --code-review <CODE_REVIEW> --submit
Failed: gerrit approve <CHANGE>,<PATCHSET> --message 'Build Failed <BUILDS_STATS>' --verified <VERIFIED> --code-review <CODE_REVIEW>
Unstable: gerrit approve <CHANGE>,<PATCHSET> --message 'Build Unstable <BUILDS_STATS>' --verified <VERIFIED> --code-review <CODE_REVIEW>
Note that it is useful to include something in the messages about what testing
system is supplying these messages.
When creating jobs in Jenkins you will have the option to add triggers. You
should configure as follows::
Trigger on Patchset Uploaded: ticked
(the rest unticked)
Type: Plain
Pattern: openstack/project-name (where project-name is the name of the project)
Branches:
Type: Path
Pattern: **
This job will now automatically trigger when a new patchset is uploaded and will
report the results to Gerrit automatically.

View File

Before

Width:  |  Height:  |  Size: 3.7 KiB

After

Width:  |  Height:  |  Size: 3.7 KiB

View File

Before

Width:  |  Height:  |  Size: 3.6 KiB

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

Before

Width:  |  Height:  |  Size: 4.6 KiB

After

Width:  |  Height:  |  Size: 4.6 KiB

View File

@ -1,13 +0,0 @@
#!/bin/bash
lsbdistcodename=`lsb_release -c -s`
puppet_deb=puppetlabs-release-${lsbdistcodename}.deb
wget http://apt.puppetlabs.com/$puppet_deb -O $puppet_deb
dpkg -i $puppet_deb
apt-get update
apt-get install -y puppet git rubygems
git clone https://github.com/openstack/openstack-ci-puppet
bash openstack-ci-puppet/install_modules.sh
puppet apply --modulepath=`pwd`/openstack-ci-puppet/modules:/etc/puppet/modules -e 'node default {class { "openstack_project::bare_slave": install_users => false }}'

View File

@ -1,48 +0,0 @@
#!/bin/bash
MODULE_PATH=/etc/puppet/modules
function clone_git() {
REMOTE_URL=$1
REPO=$2
REV=$3
if [ -d $MODULE_PATH/$REPO -a ! -d $MODULE_PATH/$REPO/.git ] ; then
rm -rf $MODULE_PATH/$REPO
fi
if [ ! -d $MODULE_PATH/$REPO ] ; then
git clone $REMOTE_URL $MODULE_PATH/$REPO
fi
OLDDIR=`pwd`
cd $MODULE_PATH/$REPO
if ! git rev-parse HEAD | grep "^$REV" >/dev/null; then
git fetch $REMOTE_URL
git reset --hard $REV >/dev/null
fi
cd $OLDDIR
}
if ! puppet help module >/dev/null 2>&1 ; then
apt-get install -y -o Dpkg::Options::="--force-confold" puppet facter
fi
MODULES="
openstackci-dashboard
openstackci-vcsrepo
puppetlabs-apache
puppetlabs-apt
puppetlabs-mysql
"
MODULE_LIST=`puppet module list`
# Transition away from old things
if [ -d /etc/puppet/modules/vcsrepo/.git ] ; then
rm -rf /etc/puppet/modules/vcsrepo
fi
for MOD in $MODULES ; do
if ! echo $MODULE_LIST | grep $MOD >/dev/null 2>&1 ; then
# This will get run in cron, so silence non-error output
puppet module install --force $MOD >/dev/null
fi
done

View File

@ -1,4 +1,4 @@
class jenkins_slave::cgroups {
class jenkins::cgroups {
package { 'cgroup-bin':
ensure => present
@ -9,7 +9,7 @@ class jenkins_slave::cgroups {
replace => true,
owner => root,
mode => 0644,
content => template('jenkins_slave/cgconfig.erb')
content => template('jenkins/cgconfig.erb')
}
file { '/etc/cgrules.conf':
@ -17,7 +17,7 @@ class jenkins_slave::cgroups {
replace => true,
owner => root,
mode => 0644,
source => 'puppet:///modules/jenkins_slave/cgroups/cgrules.conf'
source => 'puppet:///modules/jenkins/cgroups/cgrules.conf'
}
service { 'cgconfig':

View File

@ -1,4 +1,4 @@
define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
define jenkins::jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
group { 'jenkins':
ensure => 'present'
@ -44,7 +44,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
group => 'jenkins',
mode => 640,
ensure => 'present',
source => "puppet:///modules/jenkins_slave/pip.conf",
source => "puppet:///modules/jenkins/pip.conf",
require => File['jenkinspipdir'],
}
@ -60,7 +60,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
group => 'jenkins',
mode => 640,
ensure => 'present',
source => "puppet:///modules/jenkins_slave/gitconfig",
source => "puppet:///modules/jenkins/gitconfig",
require => File['jenkinshome'],
}
@ -113,39 +113,6 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
ensure => 'present',
}
file { 'jenkinsbazaardir':
name => '/home/jenkins/.bazaar',
owner => 'jenkins',
group => 'jenkins',
mode => 755,
ensure => 'directory',
require => File['jenkinshome'],
}
file { 'jenkinsbazaarwhoami':
name => '/home/jenkins/.bazaar/bazaar.conf',
owner => 'jenkins',
group => 'jenkins',
mode => 640,
ensure => 'present',
require => File['jenkinsbazaardir'],
source => [
"puppet:///modules/jenkins_slave/bazaar.conf",
],
}
file { 'jenkinsbazaarauth':
name => '/home/jenkins/.bazaar/authentication.conf',
owner => 'jenkins',
group => 'jenkins',
mode => 640,
ensure => 'present',
require => File['jenkinsbazaardir'],
source => [
"puppet:///modules/jenkins_slave/authentication.conf",
],
}
file { 'jenkinssshconfig':
name => '/home/jenkins/.ssh/config',
owner => 'jenkins',
@ -154,7 +121,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
ensure => 'present',
require => File['jenkinssshdir'],
source => [
"puppet:///modules/jenkins_slave/ssh_config",
"puppet:///modules/jenkins/ssh_config",
],
}
@ -166,7 +133,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
ensure => 'present',
require => File['jenkinssshdir'],
source => [
"puppet:///modules/jenkins_slave/slave_private_key",
"puppet:///modules/jenkins/slave_private_key",
],
}
@ -187,7 +154,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
ensure => 'present',
require => File['jenkinsgpgdir'],
source => [
"puppet:///modules/jenkins_slave/pubring.gpg",
"puppet:///modules/jenkins/pubring.gpg",
],
}
@ -199,7 +166,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
ensure => 'present',
require => File['jenkinsgpgdir'],
source => [
"puppet:///modules/jenkins_slave/slave_gpg_key",
"puppet:///modules/jenkins/slave_gpg_key",
],
}
@ -229,7 +196,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
ensure => 'present',
require => File['jenkinsglanceconfigdir'],
source => [
"puppet:///modules/jenkins_slave/glance_s3.conf",
"puppet:///modules/jenkins/glance_s3.conf",
],
}
@ -241,7 +208,7 @@ define jenkinsuser($ensure = present, $sudo = false, $ssh_key) {
ensure => 'present',
require => File['jenkinsglanceconfigdir'],
source => [
"puppet:///modules/jenkins_slave/glance_swift.conf",
"puppet:///modules/jenkins/glance_swift.conf",
],
}

View File

@ -1,4 +1,4 @@
class jenkins_job_builder (
class jenkins::job_builder (
$url,
$username,
$password,
@ -50,7 +50,7 @@ class jenkins_job_builder (
owner => 'jenkins',
mode => 400,
ensure => 'present',
content => template('jenkins_job_builder/jenkins_jobs.ini.erb'),
content => template('jenkins/jenkins_jobs.ini.erb'),
require => File["/etc/jenkins_jobs"],
}

View File

@ -1,4 +1,4 @@
class jenkins_master($vhost_name=$fqdn,
class jenkins::master($vhost_name=$fqdn,
$serveradmin="webmaster@$fqdn",
$logo,
$ssl_cert_file='',
@ -29,7 +29,7 @@ class jenkins_master($vhost_name=$fqdn,
port => 443,
docroot => 'MEANINGLESS ARGUMENT',
priority => '50',
template => 'jenkins_master/jenkins.vhost.erb',
template => 'jenkins/jenkins.vhost.erb',
ssl => true,
}
a2mod { 'rewrite':
@ -66,7 +66,7 @@ class jenkins_master($vhost_name=$fqdn,
group => 'root',
mode => 444,
ensure => 'present',
source => "puppet:///modules/jenkins_master/versions.conf",
source => "puppet:///modules/jenkins/versions.conf",
replace => 'true',
notify => Service["versions"]
}
@ -104,25 +104,25 @@ class jenkins_master($vhost_name=$fqdn,
file { "/var/lib/jenkins/plugins/simple-theme-plugin/openstack.css":
ensure => present,
source => "puppet:///modules/jenkins_master/openstack.css",
source => "puppet:///modules/jenkins/openstack.css",
require => File["/var/lib/jenkins/plugins/simple-theme-plugin"]
}
file { "/var/lib/jenkins/plugins/simple-theme-plugin/openstack.js":
ensure => present,
content => template("jenkins_master/openstack.js.erb"),
content => template("jenkins/openstack.js.erb"),
require => File["/var/lib/jenkins/plugins/simple-theme-plugin"]
}
file { "/var/lib/jenkins/plugins/simple-theme-plugin/openstack-page-bkg.jpg":
ensure => present,
source => "puppet:///modules/jenkins_master/openstack-page-bkg.jpg",
source => "puppet:///modules/jenkins/openstack-page-bkg.jpg",
require => File["/var/lib/jenkins/plugins/simple-theme-plugin"]
}
file { "/var/lib/jenkins/plugins/simple-theme-plugin/title.png":
ensure => present,
source => "puppet:///modules/jenkins_master/${logo}",
source => "puppet:///modules/jenkins/${logo}",
require => File["/var/lib/jenkins/plugins/simple-theme-plugin"]
}
@ -141,7 +141,7 @@ class jenkins_master($vhost_name=$fqdn,
recurse => true,
require => File['/usr/local/jenkins'],
source => [
"puppet:///modules/jenkins_slave/slave_scripts",
"puppet:///modules/jenkins/slave_scripts",
],
}
}

View File

@ -1,140 +0,0 @@
#
# Default: should at least behave like an openstack server
#
node default {
include openstack_project::puppet_cron
include openstack_project::server
}
#
# Long lived servers:
#
node "review.openstack.org" {
class { 'openstack_project::review':
github_oauth_token => hiera('gerrit_github_token'),
mysql_password => hiera('gerrit_mysql_password'),
mysql_root_password => hiera('gerrit_mysql_root_password'),
email_private_key => hiera('gerrit_email_private_key'),
gerritbot_password => hiera('gerrit_gerritbot_password'),
}
}
node "gerrit-dev.openstack.org", "review-dev.openstack.org" {
class { 'openstack_project::review_dev':
github_oauth_token => hiera('gerrit_dev_github_token'),
mysql_password => hiera('gerrit_dev_mysql_password'),
mysql_root_password => hiera('gerrit_dev_mysql_root_password'),
email_private_key => hiera('gerrit_dev_email_private_key')
}
}
node "jenkins.openstack.org" {
class { 'openstack_project::jenkins':
jenkins_jobs_password => hiera('jenkins_jobs_password'),
}
class { "openstack_project::zuul":
jenkins_server => "https://$fqdn",
jenkins_user => 'hudson-openstack',
jenkins_apikey => hiera('zuul_jenkins_apikey'),
gerrit_server => 'review.openstack.org',
gerrit_user => 'jenkins',
}
}
node "jenkins-dev.openstack.org" {
include openstack_project::jenkins_dev
}
node "community.openstack.org" {
include openstack_project::community
}
node "ci-puppetmaster.openstack.org" {
include openstack_project::puppetmaster
}
node "lists.openstack.org" {
class { 'openstack_project::lists':
listadmins => hiera('listadmins'),
}
}
node "paste.openstack.org" {
include openstack_project::paste
}
node "planet.openstack.org" {
include openstack_project::planet
}
node "eavesdrop.openstack.org" {
class { 'openstack_project::eavesdrop':
nickpass => hiera('openstack_meetbot_password'),
}
}
node "pypi.openstack.org" {
include openstack_project::pypi
}
node 'etherpad.openstack.org' {
class { 'openstack_project::etherpad':
etherpad_crt => hiera('etherpad_crt'),
etherpad_key => hiera('etherpad_key'),
database_password => hiera('etherpad_db_password'),
}
}
node 'wiki.openstack.org' {
include openstack_project::wiki
}
node 'puppet-dashboard.openstack.org' {
class { 'openstack_project::dashboard':
password => hiera('dashboard_password'),
mysql_password => hiera('dashboard_mysql_password'),
}
}
# A machine to serve static content.
node 'static.openstack.org' {
include openstack_project::static
}
# A bare machine, but with a jenkins user
node /^.*\.template\.openstack\.org$/ {
include openstack_project::slave_template
}
# A backup machine. Don't run cron or puppet agent on it.
node /^ci-backup-.*\.openstack\.org$/ {
include openstack_project::backup_server
}
#
# Jenkins slaves:
#
# Rollout cgroups to precise slaves.
node /^precise.*\.slave\.openstack\.org$/ {
include openstack_project::puppet_cron
include openstack_project::slave
include ulimit
ulimit::conf { 'limit_jenkins_procs':
limit_domain => 'jenkins',
limit_type => 'hard',
limit_item => 'nproc',
limit_value => '256'
}
include jenkins_slave::cgroups
}
node /^.*\.slave\.openstack\.org$/ {
include openstack_project::puppet_cron
include openstack_project::slave
}
node /^.*\.jclouds\.openstack\.org$/ {
include openstack_project::bare_slave
}

View File

@ -1,9 +1,9 @@
class jenkins_slave($ssh_key, $sudo = false, $bare = false, $user = true) {
class jenkins::slave($ssh_key, $sudo = false, $bare = false, $user = true) {
include pip
if ($user == true) {
jenkinsuser { "jenkins":
jenkins::jenkinsuser { "jenkins":
ensure => present,
sudo => $sudo,
ssh_key => "${ssh_key}"
@ -73,7 +73,7 @@ class jenkins_slave($ssh_key, $sudo = false, $bare = false, $user = true) {
mode => 644,
ensure => 'present',
source => [
"puppet:///modules/jenkins_slave/rubygems.sh",
"puppet:///modules/jenkins/rubygems.sh",
],
}
@ -143,7 +143,7 @@ class jenkins_slave($ssh_key, $sudo = false, $bare = false, $user = true) {
recurse => true,
require => File['/usr/local/jenkins'],
source => [
"puppet:///modules/jenkins_slave/slave_scripts",
"puppet:///modules/jenkins/slave_scripts",
],
}
@ -151,7 +151,7 @@ class jenkins_slave($ssh_key, $sudo = false, $bare = false, $user = true) {
# https://lists.launchpad.net/openstack/msg13381.html
file { '/etc/sysctl.d/10-ptrace.conf':
ensure => present,
source => "puppet:///modules/jenkins_slave/10-ptrace.conf",
source => "puppet:///modules/jenkins/10-ptrace.conf",
owner => 'root',
group => 'root',
mode => 444,

View File

@ -1,23 +0,0 @@
class bup {
package { "bup":
ensure => present
}
file { "/etc/bup-excludes":
ensure => present,
content => "/proc/*
/sys/*
/dev/*
/tmp/*
/floppy/*
/cdrom/*
/var/spool/squid/*
/var/spool/exim/*
/media/*
/mnt/*
/var/agentx/*
/run/*
"
}
}

View File

@ -1,8 +0,0 @@
define bup::site($backup_user, $backup_server) {
cron { "bup-$name":
user => root,
hour => "5",
minute => "37",
command => "tar -X /etc/bup-excludes -cPf - / | bup split -r $backup_user@$backup_server: -n root -q",
}
}

View File

@ -1,4 +0,0 @@
# This file is managed by puppet
# Use localhost in the node name so that we don't need to
# touch /etc/hosts or use dns
NODENAME=rabbit@localhost

View File

@ -1,41 +0,0 @@
# A machine ready to run devstack
class devstack_host {
package { "linux-headers-virtual":
ensure => present,
}
package { "mysql-server":
ensure => present,
}
package { "rabbitmq-server":
ensure => present,
require => File['rabbitmq-env.conf'],
}
file { "/etc/rabbitmq":
ensure => "directory",
}
file { 'rabbitmq-env.conf':
name => '/etc/rabbitmq/rabbitmq-env.conf',
owner => 'root',
group => 'root',
mode => 444,
ensure => 'present',
source => [
"puppet:///modules/devstack_host/rabbitmq-env.conf",
],
require => File['/etc/rabbitmq'],
}
exec { "Set MySQL server root password":
subscribe => [ Package["mysql-server"]],
refreshonly => true,
unless => "mysqladmin -uroot -psecret status",
path => "/bin:/usr/bin",
command => "mysqladmin -uroot password secret",
}
}

View File

@ -1,7 +0,0 @@
function customStart()
{
//define your javascript here
//jquery is available - except index.js
//you can load extra scripts with $.getScript http://api.jquery.com/jQuery.getScript/
chat.stickToScreen(true);
}

View File

@ -1,58 +0,0 @@
class etherpad_lite::apache (
$vhost_name = $fqdn,
$etherpad_crt,
$etherpad_key
) {
include remove_nginx
apache::vhost { $vhost_name:
port => 443,
docroot => 'MEANINGLESS ARGUMENT',
priority => '50',
template => 'etherpad_lite/etherpadlite.vhost.erb',
require => File["/etc/ssl/certs/${vhost_name}.pem",
"/etc/ssl/private/${vhost_name}.key"],
ssl => true,
}
a2mod { 'rewrite':
ensure => present
}
a2mod { 'proxy':
ensure => present
}
a2mod { 'proxy_http':
ensure => present
}
file { '/etc/ssl/certs':
ensure => directory,
owner => 'root',
mode => 0700,
}
file { '/etc/ssl/private':
ensure => directory,
owner => 'root',
mode => 0700,
}
file { "/etc/ssl/certs/${vhost_name}.pem":
ensure => present,
replace => true,
owner => 'root',
mode => 0600,
content => template('etherpad_lite/eplite.crt.erb'),
require => File['/etc/ssl/certs'],
}
file { "/etc/ssl/private/${vhost_name}.key":
ensure => present,
replace => true,
owner => 'root',
mode => 0600,
content => template('etherpad_lite/eplite.key.erb'),
require => File['/etc/ssl/private'],
}
}

View File

@ -1,26 +0,0 @@
class etherpad_lite::backup (
$minute = '0',
$hour = '0',
$day = '*',
$dest = "${etherpad_lite::base_log_dir}/${etherpad_lite::ep_user}/db.sql.gz",
$rotation = 'daily',
$num_backups = '30'
) {
cron { eplitedbbackup:
ensure => present,
command => "/usr/bin/mysqldump --defaults-file=/etc/mysql/debian.cnf --opt etherpad-lite | gzip -9 > ${dest}",
minute => $minute,
hour => $hour,
weekday => $day,
require => Package['mysql-server']
}
include logrotate
logrotate::file { 'eplitedb':
log => $dest,
options => ['nocompress', "rotate ${num_backups}", $rotation],
require => Cron['eplitedbbackup']
}
}

View File

@ -1,142 +0,0 @@
# define to build from source using ./configure && make && make install.
define buildsource(
$dir = $title,
$user = 'root',
$creates = '/nonexistant/file'
) {
exec { "./configure in ${dir}":
command => './configure',
path => "/usr/bin:/bin:/usr/local/bin:${dir}",
user => $user,
cwd => $dir,
creates => $creates
} ->
exec { "make in ${dir}":
command => 'make',
path => '/usr/bin:/bin',
user => $user,
cwd => $dir,
creates => $creates
} ->
exec { "make install in ${dir}":
command => 'make install',
path => '/usr/bin:/bin',
user => $user,
cwd => $dir,
creates => $creates
}
}
# Class to install etherpad lite. Puppet acts a lot like a package manager
# through this class.
#
# To use etherpad lite you will want the following includes:
# include etherpad_lite
# include etherpad_lite::mysql # necessary to use mysql as the backend
# include etherpad_lite::site # configures etherpad lite instance
# include etherpad_lite::apache # will add reverse proxy on localhost
# The defaults for all the classes should just work (tm)
#
#
class etherpad_lite (
$ep_user = 'eplite',
$base_log_dir = '/var/log',
$base_install_dir = '/opt/etherpad-lite'
) {
user { $ep_user:
shell => '/sbin/nologin',
home => "${base_log_dir}/${ep_user}",
system => true,
gid => $ep_user,
require => Group[$ep_user]
}
group { $ep_user:
ensure => present
}
# Below is what happens when you treat puppet as a package manager.
# This is probably bad, but it works and you don't need to roll .debs.
file { "${base_install_dir}":
ensure => directory,
group => $ep_user,
mode => 0664,
}
vcsrepo { "${base_install_dir}/nodejs":
ensure => present,
provider => git,
source => 'https://github.com/joyent/node.git',
revision => 'v0.6.16',
require => Package['git']
}
package { ['gzip',
'curl',
'python',
'libssl-dev',
'pkg-config',
'abiword',
'build-essential']:
ensure => present
}
package { ['nodejs', 'npm']:
ensure => purged
}
buildsource { "${base_install_dir}/nodejs":
creates => '/usr/local/bin/node',
require => [Package['gzip'],
Package['curl'],
Package['python'],
Package['libssl-dev'],
Package['pkg-config'],
Package['build-essential'],
Vcsrepo["${base_install_dir}/nodejs"]]
}
vcsrepo { "${base_install_dir}/etherpad-lite":
ensure => present,
provider => git,
source => "https://github.com/Pita/etherpad-lite.git",
owner => $ep_user,
require => Package['git'],
}
exec { 'install_etherpad_dependencies':
command => './bin/installDeps.sh',
path => "/usr/bin:/bin:/usr/local/bin:${base_install_dir}/etherpad-lite",
user => $ep_user,
cwd => "${base_install_dir}/etherpad-lite",
environment => "HOME=${base_log_dir}/${ep_user}",
require => [Vcsrepo["${base_install_dir}/etherpad-lite"],
Buildsource["${base_install_dir}/nodejs"]],
before => File["${base_install_dir}/etherpad-lite/settings.json"],
creates => "${base_install_dir}/etherpad-lite/node_modules"
}
file { '/etc/init/etherpad-lite.conf':
ensure => 'present',
content => template('etherpad_lite/upstart.erb'),
replace => 'true',
owner => 'root',
}
file { '/etc/init.d/etherpad-lite':
ensure => link,
target => '/lib/init/upstart-job'
}
file { "${base_log_dir}/${ep_user}":
ensure => directory,
owner => $ep_user,
}
# end package management ugliness
}

View File

@ -1,64 +0,0 @@
class etherpad_lite::mysql (
$dbType = 'mysql',
$database_user = 'eplite',
$database_name = 'etherpad-lite',
$database_password
) {
include etherpad_lite
package { 'mysql-server':
ensure => present
}
package { 'mysql-client':
ensure => present
}
service { "mysql":
enable => true,
ensure => running,
hasrestart => true,
require => [Package['mysql-server'],
Package['mysql-client']]
}
file { "${etherpad_lite::base_install_dir}/etherpad-lite/create_database.sh":
ensure => 'present',
content => template('etherpad_lite/create_database.sh.erb'),
replace => true,
owner => $etherpad_lite::ep_user,
group => $etherpad_lite::ep_user,
mode => 0755,
require => Class['etherpad_lite']
}
file { "${etherpad_lite::base_install_dir}/etherpad-lite/create_user.sh":
ensure => 'present',
content => template('etherpad_lite/create_user.sh.erb'),
replace => true,
owner => $etherpad_lite::ep_user,
group => $etherpad_lite::ep_user,
mode => 0755,
require => Class['etherpad_lite']
}
exec { "create-etherpad-lite-db":
unless => "mysql --defaults-file=/etc/mysql/debian.cnf ${database_name}",
path => ['/bin', '/usr/bin'],
command => "${etherpad_lite::base_install_dir}/etherpad-lite/create_database.sh",
require => [Service['mysql'],
File["${etherpad_lite::base_install_dir}/etherpad-lite/settings.json"],
File["${etherpad_lite::base_install_dir}/etherpad-lite/create_database.sh"]]
} ->
exec { "grant-etherpad-lite-db":
unless => "mysql -u${database_user} -p${database_password} ${database_name}",
path => ['/bin', '/usr/bin'],
command => "${etherpad_lite::base_install_dir}/etherpad-lite/create_user.sh",
require => [Service['mysql'],
File["${etherpad_lite::base_install_dir}/etherpad-lite/settings.json"],
File["${etherpad_lite::base_install_dir}/etherpad-lite/create_user.sh"]]
}
}

View File

@ -1,58 +0,0 @@
class etherpad_lite::site (
$dbType = 'mysql',
$database_user = 'eplite',
$database_name = 'etherpad-lite',
$database_password,
) {
include etherpad_lite
if $dbType == 'mysql' {
service { 'etherpad-lite':
enable => true,
ensure => running,
subscribe => File["${etherpad_lite::base_install_dir}/etherpad-lite/settings.json"],
require => Class['etherpad_lite::mysql'],
}
}
else {
service { 'etherpad-lite':
enable => true,
ensure => running,
subscribe => File["${etherpad_lite::base_install_dir}/etherpad-lite/settings.json"],
}
}
file { "${etherpad_lite::base_install_dir}/etherpad-lite/settings.json":
ensure => 'present',
content => template('etherpad_lite/etherpad-lite_settings.json.erb'),
replace => true,
owner => $etherpad_lite::ep_user,
group => $etherpad_lite::ep_user,
mode => 0600,
require => Class['etherpad_lite']
}
file { "${etherpad_lite::base_install_dir}/etherpad-lite/src/static/custom/pad.js":
ensure => 'present',
source => 'puppet:///modules/etherpad_lite/pad.js',
owner => $etherpad_lite::ep_user,
group => $etherpad_lite::ep_user,
mode => 0644,
require => Class['etherpad_lite']
}
include logrotate
logrotate::file { 'epliteerror':
log => "${etherpad_lite::base_log_dir}/${etherpad_lite::ep_user}/error.log",
options => ['compress', 'copytruncate', 'missingok', 'rotate 7', 'daily', 'notifempty'],
require => Service['etherpad-lite']
}
logrotate::file { 'epliteaccess':
log => "${etherpad_lite::base_log_dir}/${etherpad_lite::ep_user}/access.log",
options => ['compress', 'copytruncate', 'missingok', 'rotate 7', 'daily', 'notifempty'],
require => Service['etherpad-lite']
}
}

View File

@ -1,3 +0,0 @@
#!/bin/bash
mysql --defaults-file=/etc/mysql/debian.cnf -e 'create database `<%= database_name %>` CHARACTER SET utf8 COLLATE utf8_bin'

View File

@ -1,3 +0,0 @@
#!/bin/bash
mysql --defaults-file=/etc/mysql/debian.cnf -e 'grant all on `<%= database_name %>`.* to "<%= database_user %>"@"localhost" identified by "<%= database_password %>";'

View File

@ -1 +0,0 @@
<%= etherpad_crt %>

View File

@ -1 +0,0 @@
<%= etherpad_key %>

View File

@ -1,47 +0,0 @@
/*
This file must be valid JSON. But comments are allowed
Please edit settings.json, not settings.json.template
*/
{
//Ip and port which etherpad should bind at
"ip": "127.0.0.1",
"port" : 9001,
//The Type of the database. You can choose between dirty, sqlite and mysql
//You should use mysql or sqlite for anything else than testing or development
"dbType" : "<%= dbType %>",
//the database specific settings
"dbSettings" : {
"user" : "<%= database_user %>",
"host" : "localhost",
"password": "<%= database_password %>",
"database": "<%= database_name %>"
},
//the default text of a pad
"defaultPadText" : "Welcome to Etherpad Lite!\n\nThis pad text is synchronized as you type, so that everyone viewing this page sees the same text. This allows you to collaborate seamlessly on documents!\n\nEtherpad Lite on Github: http:\/\/j.mp/ep-lite\n",
/* Users must have a session to access pads. This effectively allows only group pads to be accessed. */
"requireSession" : false,
/* Users may edit pads but not create new ones. Pad creation is only via the API. This applies both to group pads and regular pads. */
"editOnly" : false,
/* if true, all css & js will be minified before sending to the client. This will improve the loading performance massivly,
but makes it impossible to debug the javascript/css */
"minify" : true,
/* How long may clients use served javascript code? Without versioning this
is may cause problems during deployment. */
"maxAge" : 21600000, // 6 hours
/* This is the path to the Abiword executable. Setting it to null, disables abiword.
Abiword is needed to enable the import/export of pads*/
"abiword" : "/usr/bin/abiword",
/* This setting is used if you need http basic auth */
// "httpAuth" : "user:pass",
/* The log level we are using, can be: DEBUG, INFO, WARN, ERROR */
"loglevel": "INFO"
}

View File

@ -1,44 +0,0 @@
<VirtualHost <%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>:80>
ServerAdmin <%= scope.lookupvar("etherpad_lite::apache::serveradmin") %>
ErrorLog ${APACHE_LOG_DIR}/<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>-error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>-access.log combined
Redirect / https://<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>/
</VirtualHost>
<IfModule mod_ssl.c>
<VirtualHost <%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>:443>
ServerName <%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>
ServerAdmin <%= scope.lookupvar("etherpad_lite::apache::serveradmin") %>
ErrorLog ${APACHE_LOG_DIR}/<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>-ssl-error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>-ssl-access.log combined
SSLEngine on
SSLCertificateFile /etc/ssl/certs/<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>.pem
SSLCertificateKeyFile /etc/ssl/private/<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>.key
BrowserMatch "MSIE [2-6]" \
nokeepalive ssl-unclean-shutdown \
downgrade-1.0 force-response-1.0
# MSIE 7 and newer should be able to use keepalive
BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
RewriteEngine on
RewriteCond %{HTTP_HOST} !<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>
RewriteRule ^.*$ https://<%= scope.lookupvar("etherpad_lite::apache::vhost_name") %>
RewriteRule ^/(.*)$ http://localhost:9001/$1 [P]
ProxyPassReverse / http://localhost:9001/
</VirtualHost>
</IfModule>

View File

@ -1,26 +0,0 @@
description "etherpad-lite"
start on started networking
stop on runlevel [!2345]
env EPHOME=<%= base_install_dir %>/etherpad-lite
env EPLOGS=<%= base_log_dir %>/<%= ep_user %>
env EPUSER=<%= ep_user %>
respawn
pre-start script
chdir $EPHOME
mkdir $EPLOGS ||true
chown $EPUSER:admin $EPLOGS ||true
chmod 0755 $EPLOGS ||true
chown -R $EPUSER:admin $EPHOME/var ||true
bin/installDeps.sh >> $EPLOGS/error.log || { stop; exit 1; }
end script
script
cd $EPHOME
exec su -s /bin/sh -c 'exec "$0" "$@"' $EPUSER -- /usr/local/bin/node node_modules/ep_etherpad-lite/node/server.js \
>> $EPLOGS/access.log \
2>> $EPLOGS/error.log
end script

View File

@ -1,36 +0,0 @@
class exim($sysadmin=[], $mailman_domains=[]) {
package { 'exim4-base':
ensure => present;
}
package { 'exim4-config':
ensure => present;
}
package { 'exim4-daemon-light':
ensure => present,
require => [Package[exim4-base], Package[exim4-config]],
}
service { 'exim4':
ensure => running,
hasrestart => true,
subscribe => File['/etc/exim4/exim4.conf'],
}
file { '/etc/exim4/exim4.conf':
owner => 'root',
group => 'root',
mode => 444,
ensure => 'present',
content => template("exim/exim4.conf.erb"),
replace => 'true',
}
file { '/etc/aliases':
owner => 'root',
group => 'root',
mode => 444,
ensure => 'present',
content => template("exim/aliases.erb"),
replace => 'true',
}
}

View File

@ -1,19 +0,0 @@
# /etc/aliases
mailer-daemon: postmaster
postmaster: root
nobody: root
hostmaster: root
usenet: root
news: root
webmaster: root
www: root
ftp: root
abuse: root
noc: root
security: root
gerrit2: root
jenkins: root
<% if sysadmin.length > 0 -%>
root: <%= sysadmin.join(",") %>
<% end -%>

View File

@ -1,825 +0,0 @@
# $Cambridge: exim/exim-src/src/configure.default,v 1.14 2009/10/16 07:46:13 tom Exp $
######################################################################
# Runtime configuration file for Exim #
######################################################################
# This is a default configuration file which will operate correctly in
# uncomplicated installations. Please see the manual for a complete list
# of all the runtime configuration options that can be included in a
# configuration file. There are many more than are mentioned here. The
# manual is in the file doc/spec.txt in the Exim distribution as a plain
# ASCII file. Other formats (PostScript, Texinfo, HTML, PDF) are available
# from the Exim ftp sites. The manual is also online at the Exim web sites.
# This file is divided into several parts, all but the first of which are
# headed by a line starting with the word "begin". Only those parts that
# are required need to be present. Blank lines, and lines starting with #
# are ignored.
########### IMPORTANT ########## IMPORTANT ########### IMPORTANT ###########
# #
# Whenever you change Exim's configuration file, you *must* remember to #
# HUP the Exim daemon, because it will not pick up the new configuration #
# until you do. However, any other Exim processes that are started, for #
# example, a process started by an MUA in order to send a message, will #
# see the new configuration as soon as it is in place. #
# #
# You do not need to HUP the daemon for changes in auxiliary files that #
# are referenced from this file. They are read every time they are used. #
# #
# It is usually a good idea to test a new configuration for syntactic #
# correctness before installing it (for example, by running the command #
# "exim -C /config/file.new -bV"). #
# #
########### IMPORTANT ########## IMPORTANT ########### IMPORTANT ###########
CONFDIR = /etc/exim4
######################################################################
# MAIN CONFIGURATION SETTINGS #
######################################################################
# Specify your host's canonical name here. This should normally be the fully
# qualified "official" name of your host. If this option is not set, the
# uname() function is called to obtain the name. In many cases this does
# the right thing and you need not set anything explicitly.
# primary_hostname =
# The next three settings create two lists of domains and one list of hosts.
# These lists are referred to later in this configuration using the syntax
# +local_domains, +relay_to_domains, and +relay_from_hosts, respectively. They
# are all colon-separated lists:
domainlist local_domains = @<% if mailman_domains.length > 0 -%>:<%= mailman_domains.join(":") %><% end -%>
domainlist relay_to_domains =
hostlist relay_from_hosts = 127.0.0.1
# Most straightforward access control requirements can be obtained by
# appropriate settings of the above options. In more complicated situations,
# you may need to modify the Access Control Lists (ACLs) which appear later in
# this file.
# The first setting specifies your local domains, for example:
#
# domainlist local_domains = my.first.domain : my.second.domain
#
# You can use "@" to mean "the name of the local host", as in the default
# setting above. This is the name that is specified by primary_hostname,
# as specified above (or defaulted). If you do not want to do any local
# deliveries, remove the "@" from the setting above. If you want to accept mail
# addressed to your host's literal IP address, for example, mail addressed to
# "user@[192.168.23.44]", you can add "@[]" as an item in the local domains
# list. You also need to uncomment "allow_domain_literals" below. This is not
# recommended for today's Internet.
# The second setting specifies domains for which your host is an incoming relay.
# If you are not doing any relaying, you should leave the list empty. However,
# if your host is an MX backup or gateway of some kind for some domains, you
# must set relay_to_domains to match those domains. For example:
#
# domainlist relay_to_domains = *.myco.com : my.friend.org
#
# This will allow any host to relay through your host to those domains.
# See the section of the manual entitled "Control of relaying" for more
# information.
# The third setting specifies hosts that can use your host as an outgoing relay
# to any other host on the Internet. Such a setting commonly refers to a
# complete local network as well as the localhost. For example:
#
# hostlist relay_from_hosts = 127.0.0.1 : 192.168.0.0/16
#
# The "/16" is a bit mask (CIDR notation), not a number of hosts. Note that you
# have to include 127.0.0.1 if you want to allow processes on your host to send
# SMTP mail by using the loopback address. A number of MUAs use this method of
# sending mail.
# All three of these lists may contain many different kinds of item, including
# wildcarded names, regular expressions, and file lookups. See the reference
# manual for details. The lists above are used in the access control lists for
# checking incoming messages. The names of these ACLs are defined here:
acl_smtp_rcpt = acl_check_rcpt
acl_smtp_data = acl_check_data
# You should not change those settings until you understand how ACLs work.
# If you are running a version of Exim that was compiled with the content-
# scanning extension, you can cause incoming messages to be automatically
# scanned for viruses. You have to modify the configuration in two places to
# set this up. The first of them is here, where you define the interface to
# your scanner. This example is typical for ClamAV; see the manual for details
# of what to set for other virus scanners. The second modification is in the
# acl_check_data access control list (see below).
# av_scanner = clamd:/tmp/clamd
# For spam scanning, there is a similar option that defines the interface to
# SpamAssassin. You do not need to set this if you are using the default, which
# is shown in this commented example. As for virus scanning, you must also
# modify the acl_check_data access control list to enable spam scanning.
# spamd_address = 127.0.0.1 783
# If Exim is compiled with support for TLS, you may want to enable the
# following options so that Exim allows clients to make encrypted
# connections. In the authenticators section below, there are template
# configurations for plaintext username/password authentication. This kind
# of authentication is only safe when used within a TLS connection, so the
# authenticators will only work if the following TLS settings are turned on
# as well.
# Allow any client to use TLS.
#tls_advertise_hosts = *
# Specify the location of the Exim server's TLS certificate and private key.
# The private key must not be encrypted (password protected). You can put
# the certificate and private key in the same file, in which case you only
# need the first setting, or in separate files, in which case you need both
# options.
# tls_certificate = /etc/ssl/exim.crt
# tls_privatekey = /etc/ssl/exim.pem
# In order to support roaming users who wish to send email from anywhere,
# you may want to make Exim listen on other ports as well as port 25, in
# case these users need to send email from a network that blocks port 25.
# The standard port for this purpose is port 587, the "message submission"
# port. See RFC 4409 for details. Microsoft MUAs cannot be configured to
# talk the message submission protocol correctly, so if you need to support
# them you should also allow TLS-on-connect on the traditional but
# non-standard port 465.
# daemon_smtp_ports = 25 : 465 : 587
# tls_on_connect_ports = 465
# Specify the domain you want to be added to all unqualified addresses
# here. An unqualified address is one that does not contain an "@" character
# followed by a domain. For example, "caesar@rome.example" is a fully qualified
# address, but the string "caesar" (i.e. just a login name) is an unqualified
# email address. Unqualified addresses are accepted only from local callers by
# default. See the recipient_unqualified_hosts option if you want to permit
# unqualified addresses from remote sources. If this option is not set, the
# primary_hostname value is used for qualification.
# qualify_domain =
# If you want unqualified recipient addresses to be qualified with a different
# domain to unqualified sender addresses, specify the recipient domain here.
# If this option is not set, the qualify_domain value is used.
# qualify_recipient =
# The following line must be uncommented if you want Exim to recognize
# addresses of the form "user@[10.11.12.13]" that is, with a "domain literal"
# (an IP address) instead of a named domain. The RFCs still require this form,
# but it makes little sense to permit mail to be sent to specific hosts by
# their IP address in the modern Internet. This ancient format has been used
# by those seeking to abuse hosts by using them for unwanted relaying. If you
# really do want to support domain literals, uncomment the following line, and
# see also the "domain_literal" router below.
# allow_domain_literals
# No deliveries will ever be run under the uids of users specified by
# never_users (a colon-separated list). An attempt to do so causes a panic
# error to be logged, and the delivery to be deferred. This is a paranoic
# safety catch. There is an even stronger safety catch in the form of the
# FIXED_NEVER_USERS setting in the configuration for building Exim. The list of
# users that it specifies is built into the binary, and cannot be changed. The
# option below just adds additional users to the list. The default for
# FIXED_NEVER_USERS is "root", but just to be absolutely sure, the default here
# is also "root".
# Note that the default setting means you cannot deliver mail addressed to root
# as if it were a normal user. This isn't usually a problem, as most sites have
# an alias for root that redirects such mail to a human administrator.
never_users = root
# The setting below causes Exim to do a reverse DNS lookup on all incoming
# IP calls, in order to get the true host name. If you feel this is too
# expensive, you can specify the networks for which a lookup is done, or
# remove the setting entirely.
host_lookup = *
# The settings below, which are actually the same as the defaults in the
# code, cause Exim to make RFC 1413 (ident) callbacks for all incoming SMTP
# calls. You can limit the hosts to which these calls are made, and/or change
# the timeout that is used. If you set the timeout to zero, all RFC 1413 calls
# are disabled. RFC 1413 calls are cheap and can provide useful information
# for tracing problem messages, but some hosts and firewalls have problems
# with them. This can result in a timeout instead of an immediate refused
# connection, leading to delays on starting up SMTP sessions. (The default was
# reduced from 30s to 5s for release 4.61.)
rfc1413_hosts = *
rfc1413_query_timeout = 0s
# By default, Exim expects all envelope addresses to be fully qualified, that
# is, they must contain both a local part and a domain. If you want to accept
# unqualified addresses (just a local part) from certain hosts, you can specify
# these hosts by setting one or both of
#
# sender_unqualified_hosts =
# recipient_unqualified_hosts =
#
# to control sender and recipient addresses, respectively. When this is done,
# unqualified addresses are qualified using the settings of qualify_domain
# and/or qualify_recipient (see above).
# If you want Exim to support the "percent hack" for certain domains,
# uncomment the following line and provide a list of domains. The "percent
# hack" is the feature by which mail addressed to x%y@z (where z is one of
# the domains listed) is locally rerouted to x@y and sent on. If z is not one
# of the "percent hack" domains, x%y is treated as an ordinary local part. This
# hack is rarely needed nowadays; you should not enable it unless you are sure
# that you really need it.
#
# percent_hack_domains =
#
# As well as setting this option you will also need to remove the test
# for local parts containing % in the ACL definition below.
# When Exim can neither deliver a message nor return it to sender, it "freezes"
# the delivery error message (aka "bounce message"). There are also other
# circumstances in which messages get frozen. They will stay on the queue for
# ever unless one of the following options is set.
# This option unfreezes frozen bounce messages after two days, tries
# once more to deliver them, and ignores any delivery failures.
ignore_bounce_errors_after = 2d
# This option cancels (removes) frozen messages that are older than a week.
timeout_frozen_after = 7d
# By default, messages that are waiting on Exim's queue are all held in a
# single directory called "input" which it itself within Exim's spool
# directory. (The default spool directory is specified when Exim is built, and
# is often /var/spool/exim/.) Exim works best when its queue is kept short, but
# there are circumstances where this is not always possible. If you uncomment
# the setting below, messages on the queue are held in 62 subdirectories of
# "input" instead of all in the same directory. The subdirectories are called
# 0, 1, ... A, B, ... a, b, ... z. This has two benefits: (1) If your file
# system degrades with many files in one directory, this is less likely to
# happen; (2) Exim can process the queue one subdirectory at a time instead of
# all at once, which can give better performance with large queues.
# split_spool_directory = true
<% if mailman_domains.length > 0 -%>
# Home dir for your Mailman installation -- aka Mailman's prefix
# directory.
MM_HOME=/var/lib/mailman
#
# User and group for Mailman, should match your --with-mail-gid
# switch to Mailman's configure script.
# Value is normally "mailman"
MM_UID=list
MM_GID=list
#
# Domains that your lists are in - colon separated list
# you may wish to add these into local_domains as well
domainlist mm_domains=<%= mailman_domains.join(":") %>
#
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#
# These values are derived from the ones above and should not need
# editing unless you have munged your mailman installation
#
# The path of the Mailman mail wrapper script
MM_WRAP=MM_HOME/mail/mailman
#
# The path of the list config file (used as a required file when
# verifying list addresses)
MM_LISTCHK=MM_HOME/lists/${lc::$local_part}/config.pck
<% end -%>
######################################################################
# ACL CONFIGURATION #
# Specifies access control lists for incoming SMTP mail #
######################################################################
begin acl
# This access control list is used for every RCPT command in an incoming
# SMTP message. The tests are run in order until the address is either
# accepted or denied.
acl_check_rcpt:
# Accept if the source is local SMTP (i.e. not over TCP/IP). We do this by
# testing for an empty sending host field.
accept hosts = :
control = dkim_disable_verify
#############################################################################
# The following section of the ACL is concerned with local parts that contain
# @ or % or ! or / or | or dots in unusual places.
#
# The characters other than dots are rarely found in genuine local parts, but
# are often tried by people looking to circumvent relaying restrictions.
# Therefore, although they are valid in local parts, these rules lock them
# out, as a precaution.
#
# Empty components (two dots in a row) are not valid in RFC 2822, but Exim
# allows them because they have been encountered. (Consider local parts
# constructed as "firstinitial.secondinitial.familyname" when applied to
# someone like me, who has no second initial.) However, a local part starting
# with a dot or containing /../ can cause trouble if it is used as part of a
# file name (e.g. for a mailing list). This is also true for local parts that
# contain slashes. A pipe symbol can also be troublesome if the local part is
# incorporated unthinkingly into a shell command line.
#
# Two different rules are used. The first one is stricter, and is applied to
# messages that are addressed to one of the local domains handled by this
# host. The line "domains = +local_domains" restricts it to domains that are
# defined by the "domainlist local_domains" setting above. The rule blocks
# local parts that begin with a dot or contain @ % ! / or |. If you have
# local accounts that include these characters, you will have to modify this
# rule.
deny message = Restricted characters in address
domains = +local_domains
local_parts = ^[.] : ^.*[@%!/|]
# The second rule applies to all other domains, and is less strict. The line
# "domains = !+local_domains" restricts it to domains that are NOT defined by
# the "domainlist local_domains" setting above. The exclamation mark is a
# negating operator. This rule allows your own users to send outgoing
# messages to sites that use slashes and vertical bars in their local parts.
# It blocks local parts that begin with a dot, slash, or vertical bar, but
# allows these characters within the local part. However, the sequence /../
# is barred. The use of @ % and ! is blocked, as before. The motivation here
# is to prevent your users (or your users' viruses) from mounting certain
# kinds of attack on remote sites.
deny message = Restricted characters in address
domains = !+local_domains
local_parts = ^[./|] : ^.*[@%!] : ^.*/\\.\\./
#############################################################################
# Accept mail to postmaster in any local domain, regardless of the source,
# and without verifying the sender.
accept local_parts = postmaster
domains = +local_domains
# Deny unless the sender address can be verified.
require verify = sender
# Accept if the message arrived over an authenticated connection, from
# any host. Again, these messages are usually from MUAs, so recipient
# verification is omitted, and submission mode is set. And again, we do this
# check before any black list tests.
accept authenticated = *
control = submission/domain=
control = dkim_disable_verify
# Accept if the message comes from one of the hosts for which we are an
# outgoing relay. It is assumed that such hosts are most likely to be MUAs,
# so we set control=submission to make Exim treat the message as a
# submission. It will fix up various errors in the message, for example, the
# lack of a Date: header line. If you are actually relaying out out from
# MTAs, you may want to disable this. If you are handling both relaying from
# MTAs and submissions from MUAs you should probably split them into two
# lists, and handle them differently.
# Recipient verification is omitted here, because in many cases the clients
# are dumb MUAs that don't cope well with SMTP error responses. If you are
# actually relaying out from MTAs, you should probably add recipient
# verification here.
# Note that, by putting this test before any DNS black list checks, you will
# always accept from these hosts, even if they end up on a black list. The
# assumption is that they are your friends, and if they get onto a black
# list, it is a mistake.
accept hosts = +relay_from_hosts
control = submission
control = dkim_disable_verify
# Insist that any other recipient address that we accept is either in one of
# our local domains, or is in a domain for which we explicitly allow
# relaying. Any other domain is rejected as being unacceptable for relaying.
require message = relay not permitted
domains = +local_domains : +relay_to_domains
# We also require all accepted addresses to be verifiable. This check will
# do local part verification for local domains, but only check the domain
# for remote domains. The only way to check local parts for the remote
# relay domains is to use a callout (add /callout), but please read the
# documentation about callouts before doing this.
require verify = recipient
#############################################################################
# There are no default checks on DNS black lists because the domains that
# contain these lists are changing all the time. However, here are two
# examples of how you can get Exim to perform a DNS black list lookup at this
# point. The first one denies, whereas the second just warns.
#
# deny message = rejected because $sender_host_address is in a black list at $dnslist_domain\n$dnslist_text
# dnslists = black.list.example
#
# warn dnslists = black.list.example
# add_header = X-Warning: $sender_host_address is in a black list at $dnslist_domain
# log_message = found in $dnslist_domain
#############################################################################
#############################################################################
# This check is commented out because it is recognized that not every
# sysadmin will want to do it. If you enable it, the check performs
# Client SMTP Authorization (csa) checks on the sending host. These checks
# do DNS lookups for SRV records. The CSA proposal is currently (May 2005)
# an Internet draft. You can, of course, add additional conditions to this
# ACL statement to restrict the CSA checks to certain hosts only.
#
# require verify = csa
#############################################################################
# At this point, the address has passed all the checks that have been
# configured, so we accept it unconditionally.
accept
# This ACL is used after the contents of a message have been received. This
# is the ACL in which you can test a message's headers or body, and in
# particular, this is where you can invoke external virus or spam scanners.
# Some suggested ways of configuring these tests are shown below, commented
# out. Without any tests, this ACL accepts all messages. If you want to use
# such tests, you must ensure that Exim is compiled with the content-scanning
# extension (WITH_CONTENT_SCAN=yes in Local/Makefile).
acl_check_data:
# Deny if the message contains a virus. Before enabling this check, you
# must install a virus scanner and set the av_scanner option above.
#
# deny malware = *
# message = This message contains a virus ($malware_name).
# Add headers to a message if it is judged to be spam. Before enabling this,
# you must install SpamAssassin. You may also need to set the spamd_address
# option above.
#
# warn spam = nobody
# add_header = X-Spam_score: $spam_score\n\
# X-Spam_score_int: $spam_score_int\n\
# X-Spam_bar: $spam_bar\n\
# X-Spam_report: $spam_report
# Accept the message.
accept
######################################################################
# ROUTERS CONFIGURATION #
# Specifies how addresses are handled #
######################################################################
# THE ORDER IN WHICH THE ROUTERS ARE DEFINED IS IMPORTANT! #
# An address is passed to each router in turn until it is accepted. #
######################################################################
begin routers
<% if mailman_domains.length > 0 -%>
# Pick up on messages from our local mailman and route them via our
# special VERP-enabled transport
#
mailman_verp_router:
driver = dnslookup
# we only consider messages sent in through loopback
condition = ${if or{{eq{$sender_host_address}{127.0.0.1}} \
{eq{$sender_host_address}{::1}}}{yes}{no}}
# we do not do this for traffic going to the local machine
domains = !+local_domains:!+mm_domains
ignore_target_hosts = <; 0.0.0.0; \
64.94.110.11; \
127.0.0.0/8; \
::1/128;fe80::/10;fe \
c0::/10;ff00::/8
# only the un-VERPed bounce addresses are handled
senders = "*-bounces@*"
transport = mailman_verp_smtp
mailman_router:
driver = accept
domains = +mm_domains
require_files = MM_LISTCHK
local_part_suffix_optional
local_part_suffix = -admin : \
-bounces : -bounces+* : \
-confirm : -confirm+* : \
-join : -leave : \
-owner : -request : \
-subscribe : -unsubscribe
transport = mailman_transport
<% end -%>
# This router routes to remote hosts over SMTP by explicit IP address,
# when an email address is given in "domain literal" form, for example,
# <user@[192.168.35.64]>. The RFCs require this facility. However, it is
# little-known these days, and has been exploited by evil people seeking
# to abuse SMTP relays. Consequently it is commented out in the default
# configuration. If you uncomment this router, you also need to uncomment
# allow_domain_literals above, so that Exim can recognize the syntax of
# domain literal addresses.
# domain_literal:
# driver = ipliteral
# domains = ! +local_domains
# transport = remote_smtp
# This router routes addresses that are not in local domains by doing a DNS
# lookup on the domain name. The exclamation mark that appears in "domains = !
# +local_domains" is a negating operator, that is, it can be read as "not". The
# recipient's domain must not be one of those defined by "domainlist
# local_domains" above for this router to be used.
#
# If the router is used, any domain that resolves to 0.0.0.0 or to a loopback
# interface address (127.0.0.0/8) is treated as if it had no DNS entry. Note
# that 0.0.0.0 is the same as 0.0.0.0/32, which is commonly treated as the
# local host inside the network stack. It is not 0.0.0.0/0, the default route.
# If the DNS lookup fails, no further routers are tried because of the no_more
# setting, and consequently the address is unrouteable.
dnslookup:
driver = dnslookup
domains = ! +local_domains
transport = remote_smtp
ignore_target_hosts = 0.0.0.0 : 127.0.0.0/8
no_more
# The remaining routers handle addresses in the local domain(s), that is those
# domains that are defined by "domainlist local_domains" above.
# This router handles aliasing using a linearly searched alias file with the
# name SYSTEM_ALIASES_FILE. When this configuration is installed automatically,
# the name gets inserted into this file from whatever is set in Exim's
# build-time configuration. The default path is the traditional /etc/aliases.
# If you install this configuration by hand, you need to specify the correct
# path in the "data" setting below.
#
##### NB You must ensure that the alias file exists. It used to be the case
##### NB that every Unix had that file, because it was the Sendmail default.
##### NB These days, there are systems that don't have it. Your aliases
##### NB file should at least contain an alias for "postmaster".
#
# If any of your aliases expand to pipes or files, you will need to set
# up a user and a group for these deliveries to run under. You can do
# this by uncommenting the "user" option below (changing the user name
# as appropriate) and adding a "group" option if necessary. Alternatively, you
# can specify "user" on the transports that are used. Note that the transports
# listed below are the same as are used for .forward files; you might want
# to set up different ones for pipe and file deliveries from aliases.
system_aliases:
driver = redirect
allow_fail
allow_defer
data = ${lookup{$local_part}lsearch{/etc/aliases}}
# user = exim
file_transport = address_file
pipe_transport = address_pipe
# .forward files are not supported --jeblair
# This router matches local user mailboxes. If the router fails, the error
# message is "Unknown user".
# If you want this router to treat local parts with suffixes introduced by "-"
# or "+" characters as if the suffixes did not exist, uncomment the two local_
# part_suffix options. Then, for example, xxxx-foo@your.domain will be treated
# in the same way as xxxx@your.domain by this router.
localuser:
driver = accept
check_local_user
# local_part_suffix = +* : -*
# local_part_suffix_optional
transport = local_delivery
cannot_route_message = Unknown user
######################################################################
# TRANSPORTS CONFIGURATION #
######################################################################
# ORDER DOES NOT MATTER #
# Only one appropriate transport is called for each delivery. #
######################################################################
# A transport is used only when referenced from a router that successfully
# handles an address.
begin transports
# This transport is used for delivering messages over SMTP connections.
remote_smtp:
driver = smtp
# This transport is used for local delivery to user mailboxes in traditional
# BSD mailbox format. By default it will be run under the uid and gid of the
# local user, and requires the sticky bit to be set on the /var/mail directory.
# Some systems use the alternative approach of running mail deliveries under a
# particular group instead of using the sticky bit. The commented options below
# show how this can be done.
local_delivery:
driver = appendfile
file = /var/mail/$local_part
delivery_date_add
envelope_to_add
return_path_add
group = mail
mode = 0660
# This transport is used for handling pipe deliveries generated by alias or
# .forward files. If the pipe generates any standard output, it is returned
# to the sender of the message as a delivery error. Set return_fail_output
# instead of return_output if you want this to happen only when the pipe fails
# to complete normally. You can set different transports for aliases and
# forwards if you want to - see the references to address_pipe in the routers
# section above.
address_pipe:
driver = pipe
return_output
# This transport is used for handling deliveries directly to files that are
# generated by aliasing or forwarding.
address_file:
driver = appendfile
delivery_date_add
envelope_to_add
return_path_add
# This transport is used for handling autoreplies generated by the filtering
# option of the userforward router.
address_reply:
driver = autoreply
<% if mailman_domains.length > 0 -%>
mailman_transport:
driver = pipe
command = MM_WRAP \
'${if def:local_part_suffix \
{${sg{$local_part_suffix}{-(\\w+)(\\+.*)?}{\$1}}} \
{post}}' \
$local_part
current_directory = MM_HOME
home_directory = MM_HOME
user = MM_UID
group = MM_GID
# Mailman VERP envelope sender address formatting. This seems not to use
# quoted-printable encoding of the address, but instead just replaces the
# '@' in the recipient address with '='.
#
mailman_verp_smtp:
driver = smtp
# put recipient address into return_path
return_path = \
${local_part:$return_path}+$local_part=$domain@${domain:$return_path}
# must restrict to one recipient at a time
max_rcpt = 1
# Errors-To: may carry old return_path
headers_remove = Errors-To
headers_add = Errors-To: ${return_path}
<% end -%>
######################################################################
# RETRY CONFIGURATION #
######################################################################
begin retry
# This single retry rule applies to all domains and all errors. It specifies
# retries every 15 minutes for 2 hours, then increasing retry intervals,
# starting at 1 hour and increasing each time by a factor of 1.5, up to 16
# hours, then retries every 6 hours until 4 days have passed since the first
# failed delivery.
# WARNING: If you do not have any retry rules at all (this section of the
# configuration is non-existent or empty), Exim will not do any retries of
# messages that fail to get delivered at the first attempt. The effect will
# be to treat temporary errors as permanent. Therefore, DO NOT remove this
# retry rule unless you really don't want any retries.
# Address or Domain Error Retries
# ----------------- ----- -------
* * F,2h,15m; G,16h,1h,1.5; F,4d,6h
######################################################################
# REWRITE CONFIGURATION #
######################################################################
# There are no rewriting specifications in this default configuration file.
begin rewrite
######################################################################
# AUTHENTICATION CONFIGURATION #
######################################################################
# The following authenticators support plaintext username/password
# authentication using the standard PLAIN mechanism and the traditional
# but non-standard LOGIN mechanism, with Exim acting as the server.
# PLAIN and LOGIN are enough to support most MUA software.
#
# These authenticators are not complete: you need to change the
# server_condition settings to specify how passwords are verified.
# They are set up to offer authentication to the client only if the
# connection is encrypted with TLS, so you also need to add support
# for TLS. See the global configuration options section at the start
# of this file for more about TLS.
#
# The default RCPT ACL checks for successful authentication, and will accept
# messages from authenticated users from anywhere on the Internet.
begin authenticators
# PLAIN authentication has no server prompts. The client sends its
# credentials in one lump, containing an authorization ID (which we do not
# use), an authentication ID, and a password. The latter two appear as
# $auth2 and $auth3 in the configuration and should be checked against a
# valid username and password. In a real configuration you would typically
# use $auth2 as a lookup key, and compare $auth3 against the result of the
# lookup, perhaps using the crypteq{}{} condition.
#PLAIN:
# driver = plaintext
# server_set_id = $auth2
# server_prompts = :
# server_condition = Authentication is not yet configured
# server_advertise_condition = ${if def:tls_cipher }
# LOGIN authentication has traditional prompts and responses. There is no
# authorization ID in this mechanism, so unlike PLAIN the username and
# password are $auth1 and $auth2. Apart from that you can use the same
# server_condition setting for both authenticators.
#LOGIN:
# driver = plaintext
# server_set_id = $auth1
# server_prompts = <| Username: | Password:
# server_condition = Authentication is not yet configured
# server_advertise_condition = ${if def:tls_cipher }
######################################################################
# CONFIGURATION FOR local_scan() #
######################################################################
# If you have built Exim to include a local_scan() function that contains
# tables for private options, you can define those options here. Remember to
# uncomment the "begin" line. It is commented by default because it provokes
# an error with Exim binaries that are not built with LOCAL_SCAN_HAS_OPTIONS
# set in the Local/Makefile.
# begin local_scan
# End of Exim configuration file

View File

@ -1 +0,0 @@
GERRIT_SITE=/home/gerrit2/review_site

View File

@ -1,78 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is designed to expire old code reviews that have not been touched
# using the following rules:
# 1. if open and no activity in 2 weeks, expire
# 2. if negative comment and no activity in 1 week, expire
import paramiko
import json
import logging
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('user', help='The gerrit admin user')
parser.add_argument('ssh_key', help='The gerrit admin SSH key file')
options = parser.parse_args()
GERRIT_USER = options.user
GERRIT_SSH_KEY = options.ssh_key
logging.basicConfig(format='%(asctime)-6s: %(name)s - %(levelname)s - %(message)s', filename='/var/log/gerrit/expire_reviews.log')
logger= logging.getLogger('expire_reviews')
logger.setLevel(logging.INFO)
logger.info('Starting expire reviews')
logger.info('Connecting to Gerrit')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('localhost', username=GERRIT_USER, key_filename=GERRIT_SSH_KEY, port=29418)
def expire_patch_set(patch_id, patch_subject, has_negative):
if has_negative:
message= 'code review expired after 1 week of no activity after a negative review, it can be restored using the \`Restore Change\` button under the Patch Set on the web interface'
else:
message= 'code review expired after 2 weeks of no activity, it can be restored using the \`Restore Change\` button under the Patch Set on the web interface'
command='gerrit review --abandon --message="{0}" {1}'.format(message, patch_id)
logger.info('Expiring: %s - %s: %s', patch_id, patch_subject, message)
stdin, stdout, stderr = ssh.exec_command(command)
if stdout.channel.recv_exit_status() != 0:
logger.error(stderr.read())
# Query all open with no activity for 2 weeks
logger.info('Searching no activity for 2 weeks')
stdin, stdout, stderr = ssh.exec_command('gerrit query --current-patch-set --format JSON status:open age:2w')
for line in stdout:
row= json.loads(line)
if not row.has_key('rowCount'):
expire_patch_set(row['currentPatchSet']['revision'], row['subject'], False)
# Query all reviewed with no activity for 1 week
logger.info('Searching no activity on negative review for 1 week')
stdin, stdout, stderr = ssh.exec_command('gerrit query --current-patch-set --all-approvals --format JSON status:reviewed age:1w')
for line in stdout:
row= json.loads(line)
if not row.has_key('rowCount'):
# Search for negative approvals
for approval in row['currentPatchSet']['approvals']:
if approval['value'] == '-1':
expire_patch_set(row['currentPatchSet']['revision'], row['subject'], True)
break
logger.info('End expire review')

View File

@ -1,70 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Fetch remotes reads a project config file called projects.yaml
# It should look like:
# - project: PROJECT_NAME
# options:
# - remote: https://gerrit.googlesource.com/gerrit
import logging
import os
import subprocess
import shlex
import yaml
def run_command(cmd, status=False, env={}):
cmd_list = shlex.split(str(cmd))
newenv = os.environ
newenv.update(env)
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=newenv)
(out, nothing) = p.communicate()
if status:
return (p.returncode, out.strip())
return out.strip()
def run_command_status(cmd, env={}):
return run_command(cmd, True, env)
logging.basicConfig(level=logging.ERROR)
REPO_ROOT = os.environ.get('REPO_ROOT',
'/home/gerrit2/review_site/git')
PROJECTS_YAML = os.environ.get('PROJECTS_YAML',
'/home/gerrit2/projects.yaml')
config = yaml.load(open(PROJECTS_YAML))
for section in config:
project = section['project']
if 'remote' not in section:
continue
project_git = "%s.git" % project
os.chdir(os.path.join(REPO_ROOT, project_git))
# Make sure that the specified remote exists
remote_url = section['remote']
# We could check if it exists first, but we're ignoring output anyway
# So just try to make it, and it'll either make a new one or do nothing
run_command("git remote add -f upstream %s" % remote_url)
# Fetch new revs from it
run_command("git remote update upstream")

View File

@ -1,29 +0,0 @@
import argparse
import paramiko
import json
parser = argparse.ArgumentParser()
parser.add_argument("--host", dest="host", default="review.openstack.org",
help="gerrit host to connect to")
parser.add_argument("--port", dest="port", action='store', type=int,
default=29418, help="gerrit port to connect to")
parser.add_argument("groups", nargs=1)
options = parser.parse_args()
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(options.host, port=options.port)
group = options.groups[0]
query = "select group_uuid from account_groups where name = '%s'" % group
command = 'gerrit gsql --format JSON -c "%s"' % query
stdin, stdout, stderr = client.exec_command(command)
for line in stdout:
row = json.loads(line)
if row['type'] == 'row':
print row['columns']['group_uuid']
ret = stdout.channel.recv_exit_status()

View File

@ -1,67 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Make local repos reads a project config file called projects.yaml
# It should look like:
# - project: PROJECT_NAME
# options:
# - close-pull
# remote: https://gerrit.googlesource.com/gerrit
# TODO: add support for
# ssh -p 29418 localhost gerrit -name create-project PROJECT
import logging
import os
import subprocess
import sys
import shlex
import yaml
def run_command(cmd, status=False, env={}):
cmd_list = shlex.split(str(cmd))
newenv = os.environ
newenv.update(env)
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=newenv)
(out, nothing) = p.communicate()
if status:
return (p.returncode, out.strip())
return out.strip()
def run_command_status(cmd, env={}):
return run_command(cmd, True, env)
logging.basicConfig(level=logging.ERROR)
REPO_ROOT = sys.argv[1]
PROJECTS_YAML = os.environ.get('PROJECTS_YAML',
'/home/gerrit2/projects.yaml')
config = yaml.load(open(PROJECTS_YAML))
for section in config:
project = section['project']
project_git = "%s.git" % project
project_dir = os.path.join(REPO_ROOT, project_git)
if os.path.exists(project_dir):
continue
run_command("git --bare init --shared=group %s" % project_dir)

View File

@ -1,89 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is designed to be called by a gerrit hook. It searched new
# patchsets for strings like "bug FOO" and updates corresponding Launchpad
# bugs status.
import argparse
import re
import subprocess
import smtplib
from email.mime.text import MIMEText
BASE_DIR = '/home/gerrit2/review_site'
EMAIL_TEMPLATE = """
Hi, I'd like you to take a look at this patch for potential
documentation impact.
%s
Log:
%s
"""
DEST_ADDRESS = 'openstack-docs@lists.openstack.org'
def process_impact(git_log, args):
"""Notify doc team of doc impact"""
email_content = EMAIL_TEMPLATE % (args.change_url, git_log)
msg = MIMEText(email_content)
msg['Subject'] = '[%s] DocImpact review request' % args.project
msg['From'] = 'gerrit2@review.openstack.org'
msg['To'] = DEST_ADDRESS
s = smtplib.SMTP('localhost')
s.sendmail('gerrit2@review.openstack.org', DEST_ADDRESS, msg.as_string())
s.quit()
def docs_impacted(git_log):
"""Determine if a changes log indicates there is a doc impact"""
impact_regexp = r'DocImpact'
return re.search(impact_regexp, git_log, re.IGNORECASE)
def extract_git_log(args):
"""Extract git log of all merged commits"""
cmd = ['git',
'--git-dir=' + BASE_DIR + '/git/' + args.project + '.git',
'log', '--no-merges', args.commit + '^1..' + args.commit]
return subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('hook')
#common
parser.add_argument('--change', default=None)
parser.add_argument('--change-url', default=None)
parser.add_argument('--project', default=None)
parser.add_argument('--branch', default=None)
parser.add_argument('--commit', default=None)
#change-merged
parser.add_argument('--submitter', default=None)
#patchset-created
parser.add_argument('--uploader', default=None)
parser.add_argument('--patchset', default=None)
args = parser.parse_args()
# Get git log
git_log = extract_git_log(args)
# Process doc_impacts found in git log
if docs_impacted(git_log):
process_impact(git_log, args)
if __name__ == '__main__':
main()

View File

@ -1,139 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is designed to be called by a gerrit hook. It searched new
# patchsets for strings like "blueprint FOO" or "bp FOO" and updates
# corresponding Launchpad blueprints with links back to the change.
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
import os
import argparse
import re
import subprocess
import StringIO
import ConfigParser
import MySQLdb
BASE_DIR = '/home/gerrit2/review_site'
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
GERRIT_CONFIG = os.environ.get('GERRIT_CONFIG',
'/home/gerrit2/review_site/etc/gerrit.config')
GERRIT_SECURE_CONFIG = os.environ.get('GERRIT_SECURE_CONFIG',
'/home/gerrit2/review_site/etc/secure.config')
SPEC_RE = re.compile(r'(blueprint|bp)\s*[#:]?\s*(\S+)', re.I)
BODY_RE = re.compile(r'^\s+.*$')
def get_broken_config(filename):
""" gerrit config ini files are broken and have leading tabs """
text = ""
with open(filename,"r") as conf:
for line in conf.readlines():
text = "%s%s" % (text, line.lstrip())
fp = StringIO.StringIO(text)
c=ConfigParser.ConfigParser()
c.readfp(fp)
return c
GERRIT_CONFIG = get_broken_config(GERRIT_CONFIG)
SECURE_CONFIG = get_broken_config(GERRIT_SECURE_CONFIG)
DB_USER = GERRIT_CONFIG.get("database", "username")
DB_PASS = SECURE_CONFIG.get("database","password")
DB_DB = GERRIT_CONFIG.get("database","database")
def update_spec(launchpad, project, name, subject, link, topic=None):
# For testing, if a project doesn't match openstack/foo, use
# the openstack-ci project instead.
group, project = project.split('/')
if group != 'openstack':
project = 'openstack-ci'
spec = launchpad.projects[project].getSpecification(name=name)
if not spec: return
if spec.whiteboard:
wb = spec.whiteboard.strip()
else:
wb = ''
changed = False
if topic:
topiclink = '%s/#q,topic:%s,n,z' % (link[:link.find('/',8)],
topic)
if topiclink not in wb:
wb += "\n\n\nGerrit topic: %(link)s" % dict(link=topiclink)
changed = True
if link not in wb:
wb += "\n\n\nAddressed by: %(link)s\n %(subject)s\n" % dict(subject=subject,
link=link)
changed = True
if changed:
spec.whiteboard = wb
spec.lp_save()
def find_specs(launchpad, dbconn, args):
git_log = subprocess.Popen(['git',
'--git-dir=' + BASE_DIR + '/git/' + args.project + '.git',
'log', '--no-merges',
args.commit + '^1..' + args.commit],
stdout=subprocess.PIPE).communicate()[0]
cur = dbconn.cursor()
cur.execute("select subject, topic from changes where change_key=%s", args.change)
subject, topic = cur.fetchone()
specs = set([m.group(2) for m in SPEC_RE.finditer(git_log)])
if topic:
topicspec = topic.split('/')[-1]
specs |= set([topicspec])
for spec in specs:
update_spec(launchpad, args.project, spec, subject,
args.change_url, topic)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('hook')
#common
parser.add_argument('--change', default=None)
parser.add_argument('--change-url', default=None)
parser.add_argument('--project', default=None)
parser.add_argument('--branch', default=None)
parser.add_argument('--commit', default=None)
#change-merged
parser.add_argument('--submitter', default=None)
# patchset-created
parser.add_argument('--uploader', default=None)
parser.add_argument('--patchset', default=None)
args = parser.parse_args()
launchpad = Launchpad.login_with('Gerrit User Sync', LPNET_SERVICE_ROOT,
GERRIT_CACHE_DIR,
credentials_file = GERRIT_CREDENTIALS,
version='devel')
conn = MySQLdb.connect(user = DB_USER, passwd = DB_PASS, db = DB_DB)
find_specs(launchpad, conn, args)
if __name__ == '__main__':
main()

View File

@ -1,238 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is designed to be called by a gerrit hook. It searched new
# patchsets for strings like "bug FOO" and updates corresponding Launchpad
# bugs status.
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
import os
import argparse
import re
import subprocess
BASE_DIR = '/home/gerrit2/review_site'
GERRIT_CACHE_DIR = os.path.expanduser(os.environ.get('GERRIT_CACHE_DIR',
'~/.launchpadlib/cache'))
GERRIT_CREDENTIALS = os.path.expanduser(os.environ.get('GERRIT_CREDENTIALS',
'~/.launchpadlib/creds'))
def add_change_proposed_message(bugtask, change_url, project, branch):
subject = 'Fix proposed to %s (%s)' % (short_project(project), branch)
body = 'Fix proposed to branch: %s\nReview: %s' % (branch, change_url)
bugtask.bug.newMessage(subject=subject, content=body)
def add_change_merged_message(bugtask, change_url, project, commit,
submitter, branch, git_log):
subject = 'Fix merged to %s (%s)' % (short_project(project), branch)
git_url = 'http://github.com/%s/commit/%s' % (project, commit)
body = '''Reviewed: %s
Committed: %s
Submitter: %s
Branch: %s\n''' % (change_url, git_url, submitter, branch)
body = body + '\n' + git_log
bugtask.bug.newMessage(subject=subject, content=body)
def set_in_progress(bugtask, launchpad, uploader, change_url):
"""Set bug In progress with assignee being the uploader"""
# Retrieve uploader from Launchpad. Use email as search key if
# provided, and only set if there is a clear match.
try:
searchkey = uploader[uploader.rindex("(") + 1:-1]
except ValueError:
searchkey = uploader
persons = launchpad.people.findPerson(text=searchkey)
if len(persons) == 1:
bugtask.assignee = persons[0]
bugtask.status = "In Progress"
bugtask.lp_save()
def set_fix_committed(bugtask):
"""Set bug fix committed"""
bugtask.status = "Fix Committed"
bugtask.lp_save()
def set_fix_released(bugtask):
"""Set bug fix released"""
bugtask.status = "Fix Released"
bugtask.lp_save()
def release_fixcommitted(bugtask):
"""Set bug FixReleased if it was FixCommitted"""
if bugtask.status == u'Fix Committed':
set_fix_released(bugtask)
def tag_in_branchname(bugtask, branch):
"""Tag bug with in-branch-name tag (if name is appropriate)"""
lp_bug = bugtask.bug
branch_name = branch.replace('/', '-')
if branch_name.replace('-', '').isalnum():
lp_bug.tags = lp_bug.tags + ["in-%s" % branch_name]
lp_bug.tags.append("in-%s" % branch_name)
lp_bug.lp_save()
def short_project(full_project_name):
"""Return the project part of the git repository name"""
return full_project_name.split('/')[-1]
def git2lp(full_project_name):
"""Convert Git repo name to Launchpad project"""
project_map = {
'openstack/openstack-ci-puppet': 'openstack-ci',
'openstack-ci/devstack-gate': 'openstack-ci',
'openstack-ci/gerrit': 'openstack-ci',
'openstack-ci/lodgeit': 'openstack-ci',
'openstack-ci/meetbot': 'openstack-ci',
}
return project_map.get(full_project_name, short_project(full_project_name))
def is_direct_release(full_project_name):
"""Test against a list of projects who directly release changes."""
return full_project_name in [
'openstack-ci/devstack-gate',
'openstack-ci/lodgeit',
'openstack-ci/meetbot',
'openstack-dev/devstack',
'openstack/openstack-ci',
'openstack/openstack-ci-puppet',
'openstack/openstack-manuals',
]
def process_bugtask(launchpad, bugtask, git_log, args):
"""Apply changes to bugtask, based on hook / branch..."""
if args.hook == "change-merged":
if args.branch == 'master':
if is_direct_release(args.project):
set_fix_released(bugtask)
else:
set_fix_committed(bugtask)
elif args.branch == 'milestone-proposed':
release_fixcommitted(bugtask)
elif args.branch.startswith('stable/'):
series = args.branch[7:]
# Look for a related task matching the series
for reltask in bugtask.related_tasks:
if reltask.bug_target_name.endswith("/" + series):
# Use fixcommitted if there is any
set_fix_committed(reltask)
break
else:
# Use tagging if there isn't any
tag_in_branchname(bugtask, args.branch)
add_change_merged_message(bugtask, args.change_url, args.project,
args.commit, args.submitter, args.branch,
git_log)
if args.hook == "patchset-created":
if args.branch == 'master':
set_in_progress(bugtask, launchpad, args.uploader, args.change_url)
elif args.branch.startswith('stable/'):
series = args.branch[7:]
for reltask in bugtask.related_tasks:
if reltask.bug_target_name.endswith("/" + series):
set_in_progress(reltask, launchpad,
args.uploader, args.change_url)
break
if args.patchset == '1':
add_change_proposed_message(bugtask, args.change_url,
args.project, args.branch)
def find_bugs(launchpad, git_log, args):
"""Find bugs referenced in the git log and return related bugtasks"""
bug_regexp = r'([Bb]ug|[Ll][Pp])[\s#:]*(\d+)'
tokens = re.split(bug_regexp, git_log)
# Extract unique bug tasks
bugtasks = {}
for token in tokens:
if re.match('^\d+$', token) and (token not in bugtasks):
try:
lp_bug = launchpad.bugs[token]
for lp_task in lp_bug.bug_tasks:
if lp_task.bug_target_name == git2lp(args.project):
bugtasks[token] = lp_task
break
except KeyError:
# Unknown bug
pass
return bugtasks.values()
def extract_git_log(args):
"""Extract git log of all merged commits"""
cmd = ['git',
'--git-dir=' + BASE_DIR + '/git/' + args.project + '.git',
'log', '--no-merges', args.commit + '^1..' + args.commit]
return subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('hook')
#common
parser.add_argument('--change', default=None)
parser.add_argument('--change-url', default=None)
parser.add_argument('--project', default=None)
parser.add_argument('--branch', default=None)
parser.add_argument('--commit', default=None)
#change-merged
parser.add_argument('--submitter', default=None)
#patchset-created
parser.add_argument('--uploader', default=None)
parser.add_argument('--patchset', default=None)
args = parser.parse_args()
# Connect to Launchpad
launchpad = Launchpad.login_with('Gerrit User Sync', LPNET_SERVICE_ROOT,
GERRIT_CACHE_DIR,
credentials_file=GERRIT_CREDENTIALS,
version='devel')
# Get git log
git_log = extract_git_log(args)
# Process bugtasks found in git log
for bugtask in find_bugs(launchpad, git_log, args):
process_bugtask(launchpad, bugtask, git_log, args)
if __name__ == '__main__':
main()

View File

@ -1,71 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Add launchpad ids listed in the wiki CLA page to the CLA group in LP.
import os
import urllib
import re
from launchpadlib.launchpad import Launchpad
from launchpadlib.uris import LPNET_SERVICE_ROOT
DEBUG = False
LP_CACHE_DIR = '~/.launchpadlib/cache'
LP_CREDENTIALS = '~/.launchpadlib/creds'
CONTRIBUTOR_RE = re.compile(r'.*?\|\|\s*(?P<name>.*?)\s*\|\|\s*(?P<login>.*?)\s*\|\|\s*(?P<trans>.*?)\s*\|\|.*?')
LINK_RE = re.compile(r'\[\[.*\|\s*(?P<name>.*)\s*\]\]')
for check_path in (os.path.dirname(LP_CACHE_DIR),
os.path.dirname(LP_CREDENTIALS)):
if not os.path.exists(check_path):
os.makedirs(check_path)
wiki_members = []
for line in urllib.urlopen('http://wiki.openstack.org/Contributors?action=raw'):
m = CONTRIBUTOR_RE.match(line)
if m and m.group('login') and m.group('trans'):
login = m.group('login')
if login=="<#c0c0c0>'''Launchpad ID'''": continue
l = LINK_RE.match(login)
if l:
login = l.group('name')
wiki_members.append(login)
launchpad = Launchpad.login_with('CLA Team Sync', LPNET_SERVICE_ROOT,
LP_CACHE_DIR,
credentials_file = LP_CREDENTIALS)
lp_members = []
team = launchpad.people['openstack-cla']
for detail in team.members_details:
user = None
# detail.self_link ==
# 'https://api.launchpad.net/1.0/~team/+member/${username}'
login = detail.self_link.split('/')[-1]
status = detail.status
lp_members.append(login)
for wm in wiki_members:
if wm not in lp_members:
print "Need to add %s to LP" % (wm)
try:
person = launchpad.people[wm]
except:
print 'Unable to find %s on LP'%wm
continue
status = team.addMember(person=person, status="Approved")

View File

@ -1,31 +0,0 @@
class gerrit::cron(
$script_user='update',
$script_key_file='/home/gerrit2/.ssh/id_rsa'
) {
cron { "expireoldreviews":
user => gerrit2,
hour => 6,
minute => 3,
command => "python /usr/local/gerrit/scripts/expire_old_reviews.py ${script_user} ${script_key_file}",
require => File['/usr/local/gerrit/scripts'],
}
cron { "gerrit_repack":
user => gerrit2,
weekday => 0,
hour => 4,
minute => 7,
command => 'find /home/gerrit2/review_site/git/ -type d -name "*.git" -print -exec git --git-dir="{}" repack -afd \;',
environment => "PATH=/usr/bin:/bin:/usr/sbin:/sbin",
}
cron { "removedbdumps":
user => gerrit2,
hour => 5,
minute => 1,
command => 'find /home/gerrit2/dbupdates/ -name "*.sql.gz" -mtime +30 -exec rm -f {} \;',
environment => "PATH=/usr/bin:/bin:/usr/sbin:/sbin",
}
}

View File

@ -1,393 +0,0 @@
# Install and maintain Gerrit Code Review.
# params:
# vhost_name:
# used in the Apache virtual host, eg., review.example.com
# canonicalweburl:
# Used in the Gerrit config to generate links, eg., https://review.example.com/
# ssl_cert_file:
# ssl_key_file:
# Used in the Apache virtual host to specify the SSL cert and key files.
# ssl_chain_file:
# Optional, if you have an intermediate cert Apache should serve.
# openidssourl:
# The URL to use for OpenID in SSO mode.
# email:
# The email address Gerrit should use when sending mail.
# database_poollimit:
# container_heaplimit:
# core_packedgitopenfiles:
# core_packedgitlimit:
# core_packedgitwindowsize:
# sshd_threads:
# httpd_acceptorthreads:
# httpd_minthreads:
# httpd_maxthreads:
# httpd_maxwait:
# Gerrit configuration options; see Gerrit docs.
# commentlinks:
# A list of regexes Gerrit should hyperlink.
# war:
# The URL of the Gerrit WAR that should be downloaded and installed.
# Note that only the final component is used for comparing to the most
# recently installed WAR. In other words, if you update the war from:
#
# http://tarballs.openstack.org/ci/gerrit.war
# to:
# http://somewhereelse.example.com/gerrit.war
#
# Gerrit won't be updated unless you delete gerrit.war from
# ~gerrit2/gerrit-wars. But if you change the URL from:
#
# http://tarballs.openstack.org/ci/gerrit-2.2.2.war
# to:
# http://tarballs.openstack.org/ci/gerrit-2.3.0.war
# Gerrit will be upgraded on the next puppet run.
# replicate_github:
# A boolean enabling replication to github
# replicate_local:
# A boolean enabling local replication for apache acceleration
# gitweb:
# A boolean enabling gitweb
# testmode:
# Set this to true to disable cron jobs and replication,
# which can interfere with testing.
# TODO: make more gerrit options configurable here
class gerrit($vhost_name=$fqdn,
$canonicalweburl="https://$fqdn/",
$serveradmin="webmaster@$fqdn",
$ssl_cert_file='/etc/ssl/certs/ssl-cert-snakeoil.pem',
$ssl_key_file='/etc/ssl/private/ssl-cert-snakeoil.key',
$ssl_chain_file='',
$openidssourl="https://login.launchpad.net/+openid",
$email='',
$database_poollimit='',
$container_heaplimit='',
$core_packedgitopenfiles='',
$core_packedgitlimit='',
$core_packedgitwindowsize='',
$sshd_threads='',
$httpd_acceptorthreads='',
$httpd_minthreads='',
$httpd_maxthreads='',
$httpd_maxwait='',
$commentlinks = [],
$war,
$projects_file = 'UNDEF',
$enable_melody = 'false',
$melody_session = 'false',
$mysql_password,
$mysql_root_password,
$email_private_key,
$replicate_github=false,
$replicate_local=true,
$local_git_dir='/var/lib/git',
$replication_targets=[],
$gitweb=true,
$testmode=false
) {
include apache
$java_home = $lsbdistcodename ? {
"precise" => "/usr/lib/jvm/java-6-openjdk-amd64/jre",
"oneiric" => "/usr/lib/jvm/java-6-openjdk/jre",
"natty" => "/usr/lib/jvm/java-6-openjdk/jre",
}
user { "gerrit2":
ensure => present,
comment => "Gerrit",
home => "/home/gerrit2",
shell => "/bin/bash",
gid => "gerrit2",
managehome => true,
require => Group["gerrit2"]
}
group { "gerrit2":
ensure => present
}
if ($gitweb) {
package { "gitweb":
ensure => present
}
}
$packages = ["openjdk-6-jre-headless"]
package { $packages:
ensure => present,
}
file { "/var/log/gerrit":
ensure => "directory",
owner => 'gerrit2'
}
# Prepare gerrit directories. Even though some of these would be created
# by the init command, we can go ahead and create them now and populate them.
# That way the config files are already in place before init runs.
file { "/home/gerrit2/review_site":
ensure => "directory",
owner => "gerrit2",
require => User["gerrit2"]
}
file { "/home/gerrit2/review_site/etc":
ensure => "directory",
owner => "gerrit2",
require => File["/home/gerrit2/review_site"]
}
file { "/home/gerrit2/review_site/bin":
ensure => "directory",
owner => "gerrit2",
require => File["/home/gerrit2/review_site"]
}
file { "/home/gerrit2/review_site/static":
ensure => "directory",
owner => "gerrit2",
require => File["/home/gerrit2/review_site"]
}
file { "/home/gerrit2/review_site/hooks":
ensure => "directory",
owner => "gerrit2",
require => File["/home/gerrit2/review_site"]
}
# Skip replication if we're in test mode
if ($testmode == false) {
file { '/home/gerrit2/review_site/etc/replication.config':
owner => 'root',
group => 'root',
mode => 444,
ensure => 'present',
content => template('gerrit/replication.config.erb'),
replace => 'true',
require => File["/home/gerrit2/review_site/etc"]
}
}
if ($projects_file != 'UNDEF') {
if ($replicate_local) {
file { $local_git_dir:
ensure => "directory",
owner => "gerrit2",
}
}
file { '/home/gerrit2/projects.yaml':
owner => 'gerrit2',
group => 'gerrit2',
mode => 444,
ensure => 'present',
source => $projects_file,
replace => true,
}
exec { "make_local_repos":
user => 'gerrit2',
command => "/usr/local/gerrit/scripts/make_local_repos.py $local_git_dir",
subscribe => File["/home/gerrit2/projects.yaml"],
refreshonly => true,
require => File["/home/gerrit2/projects.yaml"]
}
}
# Gerrit sets these permissions in 'init'; don't fight them.
file { '/home/gerrit2/review_site/etc/gerrit.config':
owner => 'gerrit2',
group => 'gerrit2',
mode => 644,
ensure => 'present',
content => template('gerrit/gerrit.config.erb'),
replace => 'true',
require => File["/home/gerrit2/review_site/etc"]
}
# Secret files.
# Gerrit sets these permissions in 'init'; don't fight them. If
# these permissions aren't set correctly, gerrit init will write a
# new secure.config file and lose the mysql password.
file { '/home/gerrit2/review_site/etc/secure.config':
owner => 'gerrit2',
group => 'gerrit2',
mode => 600,
ensure => 'present',
content => template('gerrit/secure.config.erb'),
replace => 'true',
require => File["/home/gerrit2/review_site/etc"]
}
# Set up MySQL.
class {"mysql::server":
config_hash => {
'root_password' => "${mysql_root_password}",
'default_engine' => 'InnoDB',
'bind_address' => '127.0.0.1',
}
}
include mysql::server::account_security
mysql::db { "reviewdb":
user => "gerrit2",
password => "${mysql_password}",
host => "localhost",
grant => "all",
charset => "latin1",
}
# Set up apache.
apache::vhost { $vhost_name:
port => 443,
docroot => 'MEANINGLESS ARGUMENT',
priority => '50',
template => 'gerrit/gerrit.vhost.erb',
ssl => true,
}
a2mod { 'rewrite':
ensure => present
}
a2mod { 'proxy':
ensure => present
}
a2mod { 'proxy_http':
ensure => present
}
# Install Gerrit itself.
# The Gerrit WAR is specified as a url like 'http://tarballs.openstack.org/ci/gerrit-2.2.2-363-gd0a67ce.war'
# Set $basewar so that we can work with filenames like gerrit-2.2.2-363-gd0a67ce.war'.
if $war =~ /.*\/(.*)/ {
$basewar = $1
} else {
$basewar = $war
}
# This directory is used to download and cache gerrit war files.
# That way the download and install steps are kept separate.
file { "/home/gerrit2/gerrit-wars":
ensure => "directory",
require => User["gerrit2"]
}
# If we don't already have the specified WAR, download it.
exec { "download:$war":
command => "/usr/bin/wget $war -O /home/gerrit2/gerrit-wars/$basewar",
creates => "/home/gerrit2/gerrit-wars/$basewar",
require => File["/home/gerrit2/gerrit-wars"],
}
# If gerrit.war isn't the same as $basewar, install it.
file { "/home/gerrit2/review_site/bin/gerrit.war":
source => "file:///home/gerrit2/gerrit-wars/$basewar",
require => Exec["download:$war"],
ensure => present,
replace => 'true',
# user, group, and mode have to be set this way to avoid retriggering gerrit-init on every run
# because gerrit init sets them this way
owner => 'gerrit2',
group => 'gerrit2',
mode => 644,
}
# If gerrit.war was just installed, run the Gerrit "init" command.
exec { "gerrit-initial-init":
user => 'gerrit2',
command => "/usr/bin/java -jar /home/gerrit2/review_site/bin/gerrit.war init -d /home/gerrit2/review_site --batch --no-auto-start",
subscribe => File["/home/gerrit2/review_site/bin/gerrit.war"],
require => [Package["openjdk-6-jre-headless"],
User["gerrit2"],
Mysql::Db["reviewdb"],
File["/home/gerrit2/review_site/etc/gerrit.config"],
File["/home/gerrit2/review_site/etc/secure.config"]],
notify => Exec["gerrit-start"],
unless => "/usr/bin/test -f /etc/init.d/gerrit",
}
# If a new gerrit.war was just installed, run the Gerrit "init" command.
# Stop is included here because it may not be running or the init
# script may not exist, and in those cases, we don't care if it fails.
# Running the init script as the gerrit2 user _does_ work.
exec { "gerrit-init":
user => 'gerrit2',
command => "/etc/init.d/gerrit stop; /usr/bin/java -jar /home/gerrit2/review_site/bin/gerrit.war init -d /home/gerrit2/review_site --batch --no-auto-start",
subscribe => File["/home/gerrit2/review_site/bin/gerrit.war"],
refreshonly => true,
require => [Package["openjdk-6-jre-headless"],
User["gerrit2"],
Mysql::Db["reviewdb"],
File["/home/gerrit2/review_site/etc/gerrit.config"],
File["/home/gerrit2/review_site/etc/secure.config"]],
notify => Exec["gerrit-start"],
onlyif => "/usr/bin/test -f /etc/init.d/gerrit",
}
# Symlink the init script.
file { "/etc/init.d/gerrit":
ensure => link,
target => '/home/gerrit2/review_site/bin/gerrit.sh',
require => Exec['gerrit-initial-init'],
}
# The init script requires the path to gerrit to be set.
file { "/etc/default/gerritcodereview":
source => 'puppet:///modules/gerrit/gerritcodereview.default',
ensure => present,
replace => 'true',
owner => 'root',
group => 'root',
mode => 444,
}
# Make sure the init script starts on boot.
file { ['/etc/rc0.d/K10gerrit',
'/etc/rc1.d/K10gerrit',
'/etc/rc2.d/S90gerrit',
'/etc/rc3.d/S90gerrit',
'/etc/rc4.d/S90gerrit',
'/etc/rc5.d/S90gerrit',
'/etc/rc6.d/K10gerrit']:
ensure => link,
target => '/etc/init.d/gerrit',
require => File['/etc/init.d/gerrit'],
}
exec { "gerrit-start":
command => '/etc/init.d/gerrit start',
require => File['/etc/init.d/gerrit'],
refreshonly => true,
}
file { '/usr/local/gerrit':
owner => 'root',
group => 'root',
mode => 755,
ensure => 'directory',
}
file { '/usr/local/gerrit/scripts':
owner => 'root',
group => 'root',
mode => 755,
ensure => 'directory',
recurse => true,
require => File['/usr/local/gerrit'],
source => [
"puppet:///modules/gerrit/scripts",
],
}
}

View File

@ -1,13 +0,0 @@
class gerrit::remotes($ensure=present) {
cron { "gerritfetchremotes":
user => gerrit2,
ensure => $ensure,
minute => "*/30",
command => 'sleep $((RANDOM\%60+90)) && python /usr/local/gerrit/scripts/fetch_remotes.py',
require => File['/usr/local/gerrit/scripts'],
}
file { '/home/gerrit2/remotes.config':
ensure => absent
}
}

View File

@ -1,84 +0,0 @@
# This file is managed by puppet.
# https://github.com/openstack/openstack-ci-puppet
[gerrit]
basePath = git
canonicalWebUrl = <%= canonicalweburl %>
[database]
type = MYSQL
hostname = localhost
database = reviewdb
username = gerrit2
<% if database_poollimit != "" -%>
poolLimit = <%= database_poollimit %>
<% end -%>
connectionpool = true
[auth]
type = OPENID_SSO
openIdSsoUrl = <%= openidssourl %>
cookieSecure = true
contributorAgreements = true
[sendemail]
smtpServer = localhost
[container]
user = gerrit2
javaHome = <%= java_home %>
<% if container_heaplimit != "" -%>
heapLimit = <%= container_heaplimit %>
<% end -%>
[core]
<% if core_packedgitopenfiles != "" -%>
packedGitOpenFiles = <%= core_packedgitopenfiles %>
<% end -%>
<% if core_packedgitlimit != "" -%>
packedGitLimit = <%= core_packedgitlimit %>
<% end -%>
<% if core_packedgitwindowsize != "" -%>
packedGitWindowSize = <%= core_packedgitwindowsize %>
<% end -%>
[sshd]
listenAddress = *:29418
<% if sshd_threads != "" -%>
threads = <%= sshd_threads %>
<% end -%>
[httpd]
listenUrl = proxy-https://*:8081/
<% if httpd_maxwait != "" -%>
maxWait = <%= httpd_maxwait %>
<% end -%>
<% if httpd_acceptorthreads != "" -%>
acceptorThreads = <%= httpd_acceptorthreads %>
<% end -%>
<% if httpd_minthreads != "" -%>
minThreads = <%= httpd_minthreads %>
<% end -%>
<% if httpd_maxthreads != "" -%>
maxThreads = <%= httpd_maxthreads %>
<% end -%>
[cache]
directory = cache
[cache "web_sessions"]
maxAge = 1d
[user]
email = <%= email %>
<% commentlinks.each do |commentlink| -%>
[commentlink "<%= commentlink['name'] %>"]
match = "<%= commentlink['match'] %>"
link = "<%= commentlink['link'] %>"
<% end -%>
[theme]
backgroundColor = ffffff
topMenuColor = ffffff
textColor = 264d69
trimColor = eef3f5
selectionColor = d1e6ea
changeTableOutdatedColor = f5cccc
tableOddRowColor = ffffff
tableEvenRowColor = f5f5ff
[melody]
monitoring = <%= enable_melody %>
session = <%= melody_session %>
<% if gitweb -%>
[gitweb]
revision = "?p=${project}.git;a=commitdiff;h=${commit}"
<% end -%>

View File

@ -1,72 +0,0 @@
<VirtualHost <%= scope.lookupvar("gerrit::vhost_name") %>:80>
ServerAdmin <%= scope.lookupvar("gerrit::serveradmin") %>
ErrorLog ${APACHE_LOG_DIR}/gerrit-error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/gerrit-access.log combined
Redirect / https://<%= scope.lookupvar("gerrit::vhost_name") %>/
</VirtualHost>
<IfModule mod_ssl.c>
<VirtualHost <%= scope.lookupvar("gerrit::vhost_name") %>:443>
ServerName <%= scope.lookupvar("gerrit::vhost_name") %>
ServerAdmin <%= scope.lookupvar("gerrit::serveradmin") %>
ErrorLog ${APACHE_LOG_DIR}/gerrit-ssl-error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/gerrit-ssl-access.log combined
SSLEngine on
SSLCertificateFile <%= scope.lookupvar("gerrit::ssl_cert_file") %>
SSLCertificateKeyFile <%= scope.lookupvar("gerrit::ssl_key_file") %>
<% if scope.lookupvar("gerrit::ssl_chain_file") != "" %>
SSLCertificateChainFile <%= scope.lookupvar("gerrit::ssl_chain_file") %>
<% end %>
<FilesMatch "\.(cgi|shtml|phtml|php)$">
SSLOptions +StdEnvVars
</FilesMatch>
<Directory /usr/lib/cgi-bin>
SSLOptions +StdEnvVars
</Directory>
BrowserMatch "MSIE [2-6]" \
nokeepalive ssl-unclean-shutdown \
downgrade-1.0 force-response-1.0
# MSIE 7 and newer should be able to use keepalive
BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
RewriteEngine on
RewriteCond %{HTTP_HOST} !<%= scope.lookupvar("gerrit::vhost_name") %>
RewriteRule ^.*$ <%= scope.lookupvar("gerrit::canonicalweburl") %>
<% if scope.lookupvar("gerrit::replicate_local") -%>
RewriteCond %{REQUEST_URI} !^/p/
<% end -%>
RewriteRule ^/(.*)$ http://localhost:8081/$1 [P]
ProxyPassReverse / http://localhost:8081/
<% if scope.lookupvar("gerrit::replicate_local") -%>
SetEnv GIT_PROJECT_ROOT /var/lib/git/
SetEnv GIT_HTTP_EXPORT_ALL
AliasMatch ^/p/(.*/objects/[0-9a-f]{2}/[0-9a-f]{38})$ /var/lib/git/$1
AliasMatch ^/p/(.*/objects/pack/pack-[0-9a-f]{40}.(pack|idx))$ /var/lib/git/$1
ScriptAlias /p/ /usr/lib/git-core/git-http-backend/
<% end -%>
<Directory /home/gerrit2/review_site/git/>
Order allow,deny
Allow from all
</Directory>
</VirtualHost>
</IfModule>

View File

@ -1,18 +0,0 @@
# This file is managed by puppet.
# https://github.com/openstack/openstack-ci-puppet
<% if replicate_github -%>
[remote "github"]
url = git@github.com:${name}.git
authGroup = Anonymous Users
replicatePermissions = false
mirror = true
<% end -%>
<% if replicate_local -%>
[remote "local"]
url = file:///var/lib/git/${name}.git
replicationDelay = 0
threads = 4
mirror = true
<% end -%>

View File

@ -1,4 +0,0 @@
[database]
password = <%= mysql_password %>
[auth]
registerEmailPrivateKey = <%= email_private_key %>

View File

@ -1,273 +0,0 @@
#! /usr/bin/env python
# The configuration file should look like:
"""
[ircbot]
nick=NICKNAME
pass=PASSWORD
server=irc.freenode.net
port=6667
channel_config=/path/to/yaml/config
[gerrit]
user=gerrit2
key=/path/to/id_rsa
host=review.example.com
port=29418
"""
# The yaml channel config should look like:
"""
openstack-dev:
events:
- patchset-created
- change-merged
projects:
- openstack/nova
- openstack/swift
branches:
- master
"""
import ircbot
import time
import subprocess
import threading
import select
import json
import sys
import os
import ConfigParser
import daemon, daemon.pidlockfile
import traceback
import yaml
class GerritBot(ircbot.SingleServerIRCBot):
def __init__(self, channels, nickname, password, server, port=6667):
ircbot.SingleServerIRCBot.__init__(self,
[(server, port)],
nickname, nickname)
self.channel_list = channels
self.nickname = nickname
self.password = password
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
c.privmsg("nickserv", "identify %s " % self.password)
c.privmsg("nickserv", "ghost %s %s" % (self.nickname, self.password))
c.privmsg("nickserv", "release %s %s" % (self.nickname, self.password))
time.sleep(1)
c.nick(self.nickname)
def on_welcome(self, c, e):
c.privmsg("nickserv", "identify %s "% self.password)
for channel in self.channel_list:
c.join(channel)
def send(self, channel, msg):
self.connection.privmsg(channel, msg)
time.sleep(0.5)
class Gerrit(threading.Thread):
def __init__(self, ircbot, channel_config,
username, keyfile, server, port=29418):
threading.Thread.__init__(self)
self.ircbot = ircbot
self.channel_config = channel_config
self.username = username
self.keyfile = keyfile
self.server = server
self.port = port
self.proc = None
self.poll = select.poll()
def _open(self):
self.proc = subprocess.Popen(['/usr/bin/ssh', '-p', str(self.port),
'-i', self.keyfile,
'-l', self.username, self.server,
'gerrit', 'stream-events'],
bufsize=1,
stdin=None,
stdout=subprocess.PIPE,
stderr=None,
)
self.poll.register(self.proc.stdout)
def _close(self):
try:
self.poll.unregister(self.proc.stdout)
except:
pass
try:
self.proc.kill()
except:
pass
self.proc = None
def patchset_created(self, channel, data):
msg = '%s proposed a change to %s: %s %s' % (
data['patchSet']['uploader']['name'],
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(channel, msg)
def comment_added(self, channel, data):
msg = 'A comment has been added to a proposed change to %s: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(channel, msg)
for approval in data.get('approvals', []):
if (approval['type'] == 'VRIF' and approval['value'] == '-2' and
channel in self.channel_config.events.get(
'x-vrif-minus-2', set())):
msg = 'Verification of a change to %s failed: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(channel, msg)
if (approval['type'] == 'VRIF' and approval['value'] == '2' and
channel in self.channel_config.events.get(
'x-vrif-plus-2', set())):
msg = 'Verification of a change to %s succeeded: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(channel, msg)
if (approval['type'] == 'CRVW' and approval['value'] == '-2' and
channel in self.channel_config.events.get(
'x-crvw-minus-2', set())):
msg = 'A change to %s has been rejected: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(channel, msg)
if (approval['type'] == 'CRVW' and approval['value'] == '2' and
channel in self.channel_config.events.get(
'x-crvw-plus-2', set())):
msg = 'A change to %s has been approved: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(channel, msg)
def change_merged(self, channel, data):
msg = 'A change was merged to %s: %s %s' % (
data['change']['project'],
data['change']['subject'],
data['change']['url'])
self.ircbot.send(channel, msg)
def _read(self):
l = self.proc.stdout.readline()
data = json.loads(l)
channel_set = (self.channel_config.projects.get(
data['change']['project'], set()) &
self.channel_config.events.get(
data['type'], set()) &
self.channel_config.branches.get(
data['change']['branch'], set()))
for channel in channel_set:
if data['type'] == 'comment-added':
self.comment_added(channel, data)
elif data['type'] == 'patchset-created':
self.patchset_created(channel, data)
elif data['type'] == 'change-merged':
self.change_merged(channel, data)
def _listen(self):
while True:
ret = self.poll.poll()
for (fd, event) in ret:
if fd == self.proc.stdout.fileno():
if event == select.POLLIN:
self._read()
else:
raise Exception("event on ssh connection")
def _run(self):
try:
if not self.proc:
self._open()
self._listen()
except:
traceback.print_exc()
self._close()
time.sleep(5)
def run(self):
time.sleep(5)
while True:
self._run()
class ChannelConfig(object):
def __init__(self, data):
self.data = data
keys = data.keys()
for key in keys:
if key[0] != '#':
data['#'+key] = data.pop(key)
self.channels = data.keys()
self.projects = {}
self.events = {}
self.branches = {}
for channel, val in self.data.iteritems():
for event in val['events']:
event_set = self.events.get(event, set())
event_set.add(channel)
self.events[event] = event_set
for project in val['projects']:
project_set = self.projects.get(project, set())
project_set.add(channel)
self.projects[project] = project_set
for branch in val['branches']:
branch_set = self.branches.get(branch, set())
branch_set.add(channel)
self.branches[branch] = branch_set
def _main():
config=ConfigParser.ConfigParser()
config.read(sys.argv[1])
fp = config.get('ircbot', 'channel_config')
if fp:
fp = os.path.expanduser(fp)
if not os.path.exists(fp):
raise Exception("Unable to read layout config file at %s" % fp)
else:
raise Exception("Channel Config must be specified in config file.")
channel_config = ChannelConfig(yaml.load(open(fp)))
bot = GerritBot(channel_config.channels,
config.get('ircbot', 'nick'),
config.get('ircbot', 'pass'),
config.get('ircbot', 'server'),
config.getint('ircbot', 'port'))
g = Gerrit(bot,
channel_config,
config.get('gerrit', 'user'),
config.get('gerrit', 'key'),
config.get('gerrit', 'host'),
config.getint('gerrit', 'port'))
g.start()
bot.start()
def main():
if len(sys.argv) != 2:
print "Usage: %s CONFIGFILE" % sys.argv[0]
sys.exit(1)
pid = daemon.pidlockfile.TimeoutPIDLockFile(
"/var/run/gerritbot/gerritbot.pid", 10)
with daemon.DaemonContext(pidfile=pid):
_main()
if __name__ == "__main__":
main()

View File

@ -1,171 +0,0 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: gerritbot
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Gerrit IRC Bot
# Description: Announces Gerrit events to IRC
### END INIT INFO
# Author: James Blair <james.blair@rackspace.com>
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="GerritBot"
NAME=gerritbot
DAEMON=/usr/local/gerrit/$NAME
DAEMON_ARGS="/home/gerrit2/gerritbot.config"
PIDFILE=/var/run/$NAME/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
USER=gerrit2
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
. /lib/lsb/init-functions
pidof_gerritbot() {
# if there is actually an gerritbot process whose pid is in PIDFILE,
# print it and return 0.
if [ -e "$PIDFILE" ]; then
if ps -ef | grep gerrit[b]ot | grep python | awk '{print $2}' | grep -w $(cat $PIDFILE); then
return 0
fi
fi
return 1
}
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
mkdir -p /var/run/$NAME
chown $USER /var/run/$NAME
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --pidfile $PIDFILE -c $USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
# Add code here, if necessary, that waits for the process to be ready
# to handle requests from services started subsequently which depend
# on this one. As a last resort, sleep for some time.
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --signal 9 --pidfile $PIDFILE
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
rm -f /var/run/$NAME/*
return "$RETVAL"
}
#
# Function that sends a SIGHUP to the daemon/service
#
do_reload() {
#
# If the daemon can reload its configuration without
# restarting (for example, when it is sent a SIGHUP),
# then implement that here.
#
start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
return 0
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
status)
PID=$(pidof_gerritbot) || true
if [ -n "$PID" ]; then
log_daemon_msg "$DESC is running (pid $PID)."
exit 0
else
log_daemon_msg "$DESC is NOT running."
if [ -e "$PIDFILE" ]; then
exit 1
else
exit 3
fi
fi
;;
#reload|force-reload)
#
# If do_reload() is not implemented then leave this commented out
# and leave 'force-reload' as an alias for 'restart'.
#
#log_daemon_msg "Reloading $DESC" "$NAME"
#do_reload
#log_end_msg $?
#;;
restart|force-reload)
#
# If the "reload" option is implemented then remove the
# 'force-reload' alias
#
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
#echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac
:

View File

@ -1,56 +0,0 @@
openstack-infra:
events:
- patchset-created
- change-merged
- x-vrif-minus-2
projects:
- openstack/openstack-ci-puppet
- openstack-ci/config
- openstack-ci/devstack-gate
- openstack-ci/gerrit
- openstack-ci/gerrit-trigger-plugin
- openstack-ci/gerrit-verification-status-plugin
- openstack-ci/gerritbot
- openstack-ci/gerritlib
- openstack-ci/git-review
- openstack-ci/jenkins-job-builder
- openstack-ci/lodgeit
- openstack-ci/meetbot
- openstack-ci/pypi-mirror
- openstack-ci/zuul
branches:
- master
openstack-dev:
events:
- change-merged
- x-vrif-minus-2
projects:
- openstack/cinder
- openstack/glance
- openstack/horizon
- openstack/keystone
- openstack/nova
- openstack/openstack-common
- openstack/python-cinderclient
- openstack/python-glanceclient
- openstack/python-keystoneclient
- openstack/python-novaclient
- openstack/python-openstackclient
- openstack/python-quantumclient
- openstack/python-swiftclient
- openstack/quantum
- openstack/swift
branches:
- master
stackforge-dev:
events:
- patchset-created
- change-merged
- x-vrif-minus-2
projects:
- stackforge/ceilometer
- heat-api/heat
branches:
- master

View File

@ -1,57 +0,0 @@
class gerritbot(
$nick,
$password,
$server,
$user,
$vhost_name
) {
file { "/usr/local/gerrit/gerritbot":
owner => 'root',
group => 'root',
mode => 555,
ensure => 'present',
source => 'puppet:///modules/gerritbot/gerritbot',
require => File['/usr/local/gerrit'],
}
file { "/etc/init.d/gerritbot":
owner => 'root',
group => 'root',
mode => 555,
ensure => 'present',
source => 'puppet:///modules/gerritbot/gerritbot.init',
require => File['/usr/local/gerrit/gerritbot'],
}
file { "/home/gerrit2/gerritbot_channel_config.yaml":
owner => 'root',
group => 'gerrit2',
mode => 440,
ensure => 'present',
source => 'puppet:///modules/gerritbot/gerritbot_channel_config.yaml',
replace => true,
require => User['gerrit2'],
}
service { 'gerritbot':
name => 'gerritbot',
ensure => running,
enable => true,
hasrestart => true,
require => File['/etc/init.d/gerritbot'],
subscribe => [File["/usr/local/gerrit/gerritbot"],
File["/home/gerrit2/gerritbot_channel_config.yaml"]],
}
file { '/home/gerrit2/gerritbot.config':
owner => 'root',
group => 'gerrit2',
mode => 440,
ensure => 'present',
content => template('gerritbot/gerritbot.config.erb'),
replace => 'true',
require => User['gerrit2']
}
}

View File

@ -1,13 +0,0 @@
[ircbot]
nick=<%= nick %>
pass=<%= password %>
server=<%= server %>
port=6667
channel_config=/home/gerrit2/gerritbot_channel_config.yaml
lockfile=/var/run/gerritbot/gerritbot.pid
[gerrit]
user=<%= user %>
key=/home/gerrit2/.ssh/gerritbot_rsa
host=<%= vhost_name %>
port=29418

View File

@ -1,88 +0,0 @@
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Github pull requests closer reads a project config file called projects.yaml
# It should look like:
# - project: PROJECT_NAME
# options:
# - close-pull
# Github authentication information is read from github.secure.config,
# which should look like:
# [github]
# username = GITHUB_USERNAME
# password = GITHUB_PASSWORD
#
# or
#
# [github]
# oauth_token = GITHUB_OAUTH_TOKEN
import ConfigParser
import github
import os
import yaml
import logging
logging.basicConfig(level=logging.ERROR)
PROJECTS_YAML = os.environ.get('PROJECTS_YAML',
'/home/gerrit2/projects.yaml')
GITHUB_SECURE_CONFIG = os.environ.get('GITHUB_SECURE_CONFIG',
'/home/gerrit2/github.secure.config')
MESSAGE = """Thank you for contributing to OpenStack!
%(project)s uses Gerrit for code review.
Please visit http://wiki.openstack.org/GerritWorkflow and follow the instructions there to upload your change to Gerrit.
"""
secure_config = ConfigParser.ConfigParser()
secure_config.read(GITHUB_SECURE_CONFIG)
config = yaml.load(open(PROJECTS_YAML))
if secure_config.has_option("github", "oauth_token"):
ghub = github.Github(secure_config.get("github", "oauth_token"))
else:
ghub = github.Github(secure_config.get("github", "username"),
secure_config.get("github", "password"))
orgs = ghub.get_user().get_orgs()
orgs_dict = dict(zip([o.login.lower() for o in orgs], orgs))
for section in config:
project = section['project']
# Make sure we're supposed to close pull requests for this project:
if 'options' not in section or 'close-pull' not in section['options']:
continue
# Find the project's repo
project_split = project.split('/', 1)
if len(project_split) > 1:
repo = orgs_dict[project_split[0].lower()].get_repo(project_split[1])
else:
repo = ghub.get_user().get_repo(project)
# Close each pull request
pull_requests = repo.get_pulls("open")
for req in pull_requests:
vars = dict(project=project)
issue_data = {"url": repo.url + "/issues/" + str(req.number)}
issue = github.Issue.Issue(req._requester, issue_data, completed = True)
issue.create_comment(MESSAGE % vars)
req.edit(state = "closed")

View File

@ -1,74 +0,0 @@
class github (
$username,
$oauth_token,
$projects = []
) {
include pip
package { "PyGithub":
ensure => latest, # okay to use latest for pip
provider => pip,
require => Class[pip]
}
group { "github":
ensure => present
}
user { "github":
ensure => present,
comment => "Github API User",
shell => "/bin/bash",
gid => "github",
require => Group["github"]
}
file { '/etc/github':
owner => 'root',
group => 'root',
mode => 755,
ensure => 'directory',
}
file { '/etc/github/github.config':
ensure => absent
}
file { '/etc/github/github.secure.config':
owner => 'root',
group => 'github',
mode => 440,
ensure => 'present',
content => template('github/github.secure.config.erb'),
replace => 'true',
require => [Group['github'], File['/etc/github']],
}
file { '/usr/local/github':
owner => 'root',
group => 'root',
mode => 755,
ensure => 'directory',
}
file { '/usr/local/github/scripts':
owner => 'root',
group => 'root',
mode => 755,
ensure => 'directory',
recurse => true,
require => File['/usr/local/github'],
source => [
"puppet:///modules/github/scripts",
],
}
cron { "githubclosepull":
user => github,
minute => "*/5",
command => 'sleep $((RANDOM\%60+90)) && python /usr/local/github/scripts/close_pull_requests.py',
require => File['/usr/local/github/scripts'],
}
}

View File

@ -1,3 +0,0 @@
[github]
username = <%= username %>
oauth_token = <%= oauth_token %>

Some files were not shown because too many files have changed in this diff Show More