fix(proxy): fixes up proxy issues

This patch set fixes outstanding issues and ease the use of the AIO when
deployed behind a corporate proxy.

Signed-off-by: Tin Lam <tin@irrational.io>
Change-Id: Ia80cce18e6a7d3105bfed3ec423bbbd678dbe019
This commit is contained in:
Tin Lam 2021-01-11 11:15:52 -06:00
parent 64974a8225
commit 05cf6bdec5
10 changed files with 145 additions and 14 deletions

View File

@ -28,8 +28,10 @@ author = 'Airship Charts Authors'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark'
'recommonmark',
'sphinx.ext.autosectionlabel'
]
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

View File

@ -9,3 +9,4 @@ Contents:
introduction
requirements-and-host-config
vagrant
proxy

View File

@ -0,0 +1,46 @@
=====================================
Running Jarvis Behind Corporate Proxy
=====================================
Environment Variables
=====================
On the host machine, ensure the following environment variables are set with the appropriate proxy information:
``HTTP_PROXY``, ``HTTPS_PROXY``, and ``NO_PROXY``. You will also need to set the environment variable ``PRIVATE_NS``
to an IP address of a corporate name server that will resolve internal URLs.
Vagrant Plugin
==============
To easily set up the Vagrant box's proxy setting, install the `vagrant_proxyconf`_ plugin by running:
.. code:: bash
$ vagrant plugin install vagrant-proxyconf
``NO_PROXY`` Configuration
==========================
In the event ``NO_PROXY`` is not specified, the following default value will be used:
.. code::
localhost,127.0.0.1,10.96.0.0/12,192.168.49.0/24,192.168.99.0/24,10.0.2.15,10.244.0.0/16,172.28.0.0/30,.minikube.internal,.svc,.svc.cluster.local,jarvis.local
Please note the following will need to be accounted for to avoid traffic being routed through the proxy:
- Localhost: ``localhost`` and ``127.0.0.1``,
- Host and guest machine IP and name: ``jarvis``, ``jarvis.local``, etc.,
- Minikube specific IP ranges (e.g. ``102.168.49.0/24``). See minikube's `documentation`_ for detail,
- Minikube places ``host.minikube.internal`` and ``control-plane.minikube.internal`` into ``/etc/hosts``,
- Kubernetes services' URLs with ending of ``.svc``, ``.cluster.local`` or ``.svc.cluster.local``,
- Kubernetes service cluster IP ranges: ``10.96.0.0/12`` or what's configured via ``--service-cluster-ip-range``.
- DNSMasq subnet range: ``172.28.0.0/30``
Installation
============
With the appropriate environment variables set, follow instruction :ref:`here <aio-installation>`.
.. _vagrant_proxyconf: http://tmatilai.github.io/vagrant-proxyconf/
.. _documentation: https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/

View File

@ -2,4 +2,20 @@
Jarvis AIO Vagrant
==================
A vagrant file is provided under `tools/deployment/vagrant`, running `vagrant up` from this directory should bring up and deploy a copy of the Jarvis AIO.
.. _aio-installation:
Installation
============
A vagrant file is provided under ``tools/deployment/vagrant``, running
``vagrant up`` from this directory should bring up and deploy a copy of the
Jarvis AIO.
.. hint:: The recommended provider in the ``Vagrantfile`` is ``virtualbox``,
however, ``libvirt`` is included.
.. note:: This document does not cover the installation of vagrant.
Please refer to the instructions
`here <https://www.vagrantup.com/docs/installation>`_.

View File

@ -4,6 +4,16 @@
Vagrant.configure("2") do |config|
config.vm.box = "generic/ubuntu2004"
if Vagrant.has_plugin?("vagrant-proxyconf")
config.proxy.http = ENV["HTTP_PROXY"]
config.proxy.https = ENV["HTTPS_PROXY"]
if ENV["NO_PROXY"].nil?
config.proxy.no_proxy = "localhost,127.0.0.1,10.96.0.0/12,192.168.49.0/24,192.168.99.0/24,10.0.2.15,10.244.0.0/16,172.28.0.0/30,.minikube.internal,.svc,.svc.cluster.local,jarvis.local"
else
config.proxy.no_proxy = ENV["NO_PROXY"]
end
end
config.vm.synced_folder "../../../", "/airship_charts"
config.vm.network "private_network", ip: "192.168.56.10"
@ -19,9 +29,10 @@ Vagrant.configure("2") do |config|
vb.memory = 8192
end
config.vm.provision "shell", inline: <<-SHELL
config.vm.provision "shell", env: {"PRIVATE_NS" => ENV["PRIVATE_NS"]}, inline: <<-SHELL
set -ex
cd /airship_charts/
./tools/gate/jarvis/010-pre-setup.sh
./tools/gate/jarvis/050-setup-development-ca.sh
./tools/gate/jarvis/100-deploy-k8s.sh
./tools/gate/jarvis/200-deploy-support.sh
@ -29,6 +40,5 @@ Vagrant.configure("2") do |config|
./tools/gate/jarvis/400-deploy-harbor.sh
./tools/gate/jarvis/500-deploy-gerrit.sh
./tools/gate/jarvis/600-deploy-tekton.sh
SHELL
end

View File

@ -15,14 +15,16 @@ set -ex
export DEBCONF_NONINTERACTIVE_SEEN=true
export DEBIAN_FRONTEND=noninteractive
sudo swapoff -a
# Note: Including fix from https://review.opendev.org/c/openstack/openstack-helm-infra/+/763619/
echo "DefaultLimitMEMLOCK=16384" | sudo tee -a /etc/systemd/system.conf
sudo systemctl daemon-reexec
# Function to help generate a resolv.conf formatted file.
# Arguments are positional:
# 1st is location of file to be generated
# 2nd is a custom nameserver that should be used exclusively if avalible.
# 1st is location of file to be generated
# 2nd is a custom nameserver that should be used exclusively if avalible.
function generate_resolvconf() {
local target
target="${1}"
@ -34,7 +36,7 @@ nameserver ${priority_nameserver}
EOF
fi
local nameservers_systemd
nameservers_systemd="$(awk '/^nameserver/ { print $2}' /run/systemd/resolve/resolv.conf | sed '/^127.0.0./d')"
nameservers_systemd="$(awk '/^nameserver/ { print $2 }' /run/systemd/resolve/resolv.conf | sed '/^127.0.0./d')"
if [[ ${nameservers_systemd} ]]; then
for nameserver in ${nameservers_systemd}; do
sudo -E tee --append "${target}" <<EOF
@ -49,7 +51,7 @@ EOF
fi
if [[ ${priority_nameserver} ]]; then
sudo -E tee --append "${target}" <<EOF
options timeout:1 attempts:1'
options timeout:1 attempts:1
EOF
fi
}
@ -73,6 +75,7 @@ sudo add-apt-repository \
docker_resolv="$(mktemp -d)/resolv.conf"
generate_resolvconf "${docker_resolv}"
docker_dns_list="$(awk '/^nameserver/ { printf "%s%s",sep,"\"" $NF "\""; sep=", "} END{print ""}' "${docker_resolv}")"
sudo -E mkdir -p /etc/docker
sudo -E tee /etc/docker/daemon.json <<EOF
{
@ -87,6 +90,16 @@ sudo -E tee /etc/docker/daemon.json <<EOF
}
EOF
if [ -n "${HTTP_PROXY}" ]; then
sudo mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF | sudo -E tee /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTP_PROXY=${HTTP_PROXY}"
Environment="HTTPS_PROXY=${HTTPS_PROXY}"
Environment="NO_PROXY=${NO_PROXY}"
EOF
fi
sudo -E apt-get update
sudo -E apt-get install -y \
docker-ce \
@ -147,7 +160,7 @@ if [[ ${LOOPBACK_DOMAIN_TO_HOST} ]]; then
--bind-interfaces \
--address="/${LOOPBACK_DOMAIN_TO_HOST}/${host_ip}"
sudo tee /etc/kubernetes/kubelet_resolv.conf <<EOF
nameserver 172.28.0.2
nameserver 172.28.0.2
EOF
sudo rm -f /etc/resolv.conf
generate_resolvconf /etc/resolv.conf 172.28.0.2
@ -172,7 +185,8 @@ sudo -E minikube start \
--extra-config=controller-manager.cluster-cidr=192.168.0.0/16 \
--extra-config=kube-proxy.mode=ipvs \
--extra-config=apiserver.service-node-port-range=1-65535 \
--extra-config=kubelet.resolv-conf=/etc/kubernetes/kubelet_resolv.conf
--extra-config=kubelet.resolv-conf=/etc/kubernetes/kubelet_resolv.conf \
--extra-config=kubelet.cgroup-driver=systemd
sudo -E systemctl enable --now kubelet
minikube addons list

View File

@ -0,0 +1,18 @@
#!/bin/bash
set -ex
# NOTE: Use this script to perform necessary actions prior to start of the main
# deployment.
# Add the necessary corporate nameserver to systemd-resolved so it
# propagates properly and prevent it from overwriting.
# Replace 123.123.123.4 with the correct IP
: "${HTTP_PROXY:=""}"
: "${PRIVATE_NS:=""}"
if [ -n "${PRIVATE_NS}" ]; then
sudo -E sed -i -e 's/^DNS=/#DNS=/' /etc/systemd/resolved.conf
sudo -E sed -i -e "/^\[Resolve\]$/a DNS=${PRIVATE_NS}" /etc/systemd/resolved.conf
sudo rm -f /etc/resolv.conf
sudo ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
sudo systemctl restart systemd-resolved
fi

View File

@ -5,8 +5,8 @@ for cfssl_bin in cfssl cfssljson; do
if ! type -p "${cfssl_bin}"; then
version=$(curl --silent "https://api.github.com/repos/cloudflare/cfssl/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/')
version_number=${version#"v"}
sudo curl -L -o "/usr/local/bin/${cfssl_bin}" "https://github.com/cloudflare/cfssl/releases/download/${version}/${cfssl_bin}_${version_number}_linux_amd64"
sudo chmod +x "/usr/local/bin/${cfssl_bin}"
sudo -E curl -L -o "/usr/local/bin/${cfssl_bin}" "https://github.com/cloudflare/cfssl/releases/download/${version}/${cfssl_bin}_${version_number}_linux_amd64"
sudo -E chmod +x "/usr/local/bin/${cfssl_bin}"
ls "/usr/local/bin/${cfssl_bin}"
fi
done

View File

@ -12,9 +12,27 @@ for chart in tekton-pipelines tekton-triggers tekton-dashboard; do
$(./tools/deployment/common/get-values-overrides.sh "${chart}")
done
function get_yq() {
version=$(curl --silent "https://api.github.com/repos/mikefarah/yq/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/')
sudo -E curl -L -o "/usr/local/bin/yq" "https://github.com/mikefarah/yq/releases/download/${version}/yq_linux_amd64"
sudo -E chmod +x "/usr/local/bin/yq"
ls "/usr/local/bin/yq"
}
./tools/deployment/common/wait-for-pods.sh tekton-pipelines
function validate() {
# if we are using the proxy we should place that into the template
if [ -n "${HTTP_PROXY}" ]; then
get_yq
# Note: This assume syntax of yq >= 4.x
yq eval '(.spec.resourcetemplates[].spec.params[] | select(.name=="httpProxy")).value |= env(HTTP_PROXY)' -i ./tools/gate/jarvis/resources/tekton/yaml/triggertemplates/triggertemplate.yaml
yq eval '(.spec.resourcetemplates[].spec.params[] | select(.name=="httpsProxy")).value |= env(HTTPS_PROXY)' -i ./tools/gate/jarvis/resources/tekton/yaml/triggertemplates/triggertemplate.yaml
yq eval '(.spec.resourcetemplates[].spec.params[] | select(.name=="noProxy")).value |= env(NO_PROXY)' -i ./tools/gate/jarvis/resources/tekton/yaml/triggertemplates/triggertemplate.yaml
fi
kubectl -n tekton-pipelines apply -f ./tools/gate/jarvis/resources/tekton/yaml/role-resources/secret.yaml
kubectl -n tekton-pipelines apply -f ./tools/gate/jarvis/resources/tekton/yaml/role-resources/serviceaccount.yaml
kubectl -n tekton-pipelines apply -f ./tools/gate/jarvis/resources/tekton/yaml/role-resources/clustertriggerbinding-roles
@ -88,4 +106,4 @@ EOF
kubectl -n tekton-pipelines get pipelinerun
}
validate
validate

View File

@ -24,4 +24,10 @@ spec:
- name: gitrevision
value: $(tt.params.gitrevision)
- name: message
value: $(tt.params.message)
value: $(tt.params.message)
- name: httpProxy
value: ""
- name: httpsProxy
value: ""
- name: noProxy
value: "172.28.0.2,localhost,127.0.0.1,10.96.0.0/12,192.168.49.0/24,192.168.99.0/24,107.124.202.156,10.0.2.15,10.244.0.0/16,.minikube.internal,.svc,.svc.cluster.local,jarvis.local,jarvis"