Merge "Add jumphost configuration to ssh to VMs"
This commit is contained in:
commit
59532f09fc
@ -120,6 +120,15 @@ spec:
|
||||
type: object
|
||||
nodePort:
|
||||
type: integer
|
||||
nodeSSHPrivateKeys:
|
||||
description: NodeSSHPrivateKeys holds the name of a Secret
|
||||
in the same namespace as the SIPCluster CR, whose key values
|
||||
each represent an ssh private key that can be used to access
|
||||
the cluster nodes. They are mounted into the jumphost with
|
||||
the secret keys serving as file names relative to a common
|
||||
directory, and then configured as identity files in the
|
||||
SSH config file of the default user.
|
||||
type: string
|
||||
sshAuthorizedKeys:
|
||||
items:
|
||||
type: string
|
||||
@ -127,6 +136,7 @@ spec:
|
||||
required:
|
||||
- image
|
||||
- nodePort
|
||||
- nodeSSHPrivateKeys
|
||||
type: object
|
||||
type: array
|
||||
loadBalancer:
|
||||
|
@ -6,6 +6,12 @@ metadata:
|
||||
creationTimestamp: null
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- airship.airshipit.org
|
||||
resources:
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: airship.airshipit.org/v1
|
||||
kind: SIPCluster
|
||||
metadata:
|
||||
name: sipcluster-test
|
||||
name: sipcluster-system
|
||||
namespace: sipcluster-system
|
||||
finalizers:
|
||||
- sip.airship.airshipit.org/finalizer
|
||||
@ -43,6 +43,7 @@ spec:
|
||||
sshAuthorizedKeys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCyaozS8kZRw2a1d0O4YXhxtJlDPThqIZilGCsXLbukIFOyMUmMTwQAtwWp5epwU1+5ponC2uBENB6xCCj3cl5Rd43d2/B6HxyAPQGKo6/zKYGAKW2nzYDxSWMl6NUSsiJAyXUA7ZlNZQe0m8PmaferlkQyLLZo3NJpizz6U6ZCtxvj43vEl7NYWnLUEIzGP9zMqltIGnD4vYrU9keVKKXSsp+DkApnbrDapeigeGATCammy2xRrUQDuOvGHsfnQbXr2j0onpTIh0PiLrXLQAPDg8UJRgVB+ThX+neI3rQ320djzRABckNeE6e4Kkwzn+QdZsmA2SDvM9IU7boK1jVQlgUPp7zF5q3hbb8Rx7AadyTarBayUkCgNlrMqth+tmTMWttMqCPxJRGnhhvesAHIl55a28Kzz/2Oqa3J9zwzbyDIwlEXho0eAq3YXEPeBhl34k+7gOt/5Zdbh+yacFoxDh0LrshQgboAijcVVaXPeN0LsHEiVvYIzugwIvCkoFMPWoPj/kEGzPY6FCkVneDA7VoLTCoG8dlrN08Lf05/BGC7Wllm66pTNZC/cKXP+cjpQn1iEuiuPxnPldlMHx9sx2y/BRoft6oT/GzqkNy1NTY/xI+MfmxXnF5kwSbcTbzZQ9fZ8xjh/vmpPBgDNrxOEAT4N6OG7GQIhb9HEhXQCQ== example-key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwpOyZjZ4gB0OTvmofH3llh6cBCWaEiEmHZWSkDXr8Bih6HcXVOtYMcFi/ZnUVGUBPw3ATNQBZUaVCYKeF+nDfKTJ9hmnlsyHxV2LeMsVg1o15Pb6f+QJuavEqtE6HI7mHyId4Z1quVTJXDWDW8OZEG7M3VktauqAn/e9UJvlL0bGmTFD1XkNcbRsWMRWkQgt2ozqlgrpPtvrg2/+bNucxX++VUjnsn+fGgAT07kbnrZwppGnAfjbYthxhv7GeSD0+Z0Lf1kiKy/bhUqXsZIuexOfF0YrRyUH1KBl8GCX2OLBYvXHyusByqsrOPiROqRdjX5PsK6HSAS0lk0niTt1p example-key-2
|
||||
nodeSSHPrivateKeys: ssh-private-keys
|
||||
loadBalancer:
|
||||
- image: haproxy:2.3.2
|
||||
# NOTE: nodeLabels not yet implemented.
|
||||
|
@ -1,4 +1,5 @@
|
||||
resources:
|
||||
- airship_v1beta1_sipcluster.yaml
|
||||
- bmh
|
||||
- ssh_private_keys_secret.yaml
|
||||
namespace: sipcluster-system
|
||||
|
7
config/samples/ssh_private_keys_secret.yaml
Normal file
7
config/samples/ssh_private_keys_secret.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
key: RFVNTVlfREFUQQ==
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ssh-private-keys
|
||||
type: Opaque
|
@ -94,6 +94,20 @@ BMCOpts
|
||||
<td>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>nodeSSHPrivateKeys</code><br>
|
||||
<em>
|
||||
string
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>NodeSSHPrivateKeys holds the name of a Secret in the same namespace as the SIPCluster CR,
|
||||
whose key values each represent an ssh private key that can be used to access the cluster nodes.
|
||||
They are mounted into the jumphost with the secret keys serving as file names relative to a common
|
||||
directory, and then configured as identity files in the SSH config file of the default user.</p>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
10
go.sum
10
go.sum
@ -822,25 +822,19 @@ k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlm
|
||||
k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc=
|
||||
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
|
||||
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
|
||||
k8s.io/apiserver v0.19.2 h1:xq2dXAzsAoHv7S4Xc/p7PKhiowdHV/PgdePWo3MxIYM=
|
||||
k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA=
|
||||
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
|
||||
k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
|
||||
k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc=
|
||||
k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
|
||||
k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
||||
k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
||||
k8s.io/code-generator v0.19.2 h1:7uaWJll6fyCPj2j3sfNN1AiY2gZU1VFN2dFR2uoxGWI=
|
||||
k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
|
||||
k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14=
|
||||
k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14=
|
||||
k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs=
|
||||
k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw=
|
||||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
@ -858,17 +852,13 @@ k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
||||
k8s.io/utils v0.0.0-20200821003339-5e75c0163111/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g=
|
||||
k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
|
||||
sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E=
|
||||
sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E=
|
||||
sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8=
|
||||
sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
|
@ -10,7 +10,21 @@ COPY ./certs/* /usr/local/share/ca-certificates/
|
||||
RUN update-ca-certificates
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y --no-install-recommends jq openssh-server python3-pip python3-setuptools
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
bash-completion \
|
||||
jq \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
openssh-server \
|
||||
openssh-client
|
||||
|
||||
# uncomment (enable) bash completion config
|
||||
RUN START=$(sed -n '/# enable bash completion in interactive shells/=' /etc/bash.bashrc) && \
|
||||
sed -i "$((START + 1)),$((START + 7))"' s/^##*//' /etc/bash.bashrc
|
||||
# disable bash completion based on /etc/hosts, /etc/known_hosts, etc.
|
||||
# so that only ssh config file entries are used
|
||||
ENV COMP_KNOWN_HOSTS_WITH_HOSTFILE=
|
||||
|
||||
|
||||
RUN pip3 install --upgrade pip
|
||||
RUN pip3 config set global.cert /etc/ssl/certs/ca-certificates.crt
|
||||
|
@ -85,6 +85,11 @@ type JumpHostService struct {
|
||||
SIPClusterService `json:",inline"`
|
||||
BMC *BMCOpts `json:"bmc,omitempty"`
|
||||
SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"`
|
||||
// NodeSSHPrivateKeys holds the name of a Secret in the same namespace as the SIPCluster CR,
|
||||
// whose key values each represent an ssh private key that can be used to access the cluster nodes.
|
||||
// They are mounted into the jumphost with the secret keys serving as file names relative to a common
|
||||
// directory, and then configured as identity files in the SSH config file of the default user.
|
||||
NodeSSHPrivateKeys string `json:"nodeSSHPrivateKeys"`
|
||||
}
|
||||
|
||||
// SIPClusterStatus defines the observed state of SIPCluster
|
||||
|
@ -51,6 +51,7 @@ const (
|
||||
// +kubebuilder:rbac:groups=airship.airshipit.org,resources=sipclusters/status,verbs=get;update;patch
|
||||
|
||||
// +kubebuilder:rbac:groups="metal3.io",resources=baremetalhosts,verbs=get;update;patch;list
|
||||
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;
|
||||
|
||||
func (r *SIPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
r.NamespacedName = req.NamespacedName
|
||||
|
@ -41,7 +41,7 @@ const (
|
||||
var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
opts := []client.DeleteAllOfOption{client.InNamespace("default")}
|
||||
opts := []client.DeleteAllOfOption{client.InNamespace(testNamespace)}
|
||||
Expect(k8sClient.DeleteAllOf(context.Background(), &metal3.BareMetalHost{}, opts...)).Should(Succeed())
|
||||
Expect(k8sClient.DeleteAllOf(context.Background(), &airshipv1.SIPCluster{}, opts...)).Should(Succeed())
|
||||
Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.Secret{}, opts...)).Should(Succeed())
|
||||
@ -71,7 +71,8 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create SIP cluster
|
||||
name := "subcluster-test1"
|
||||
sipCluster := testutil.CreateSIPCluster(name, testNamespace, 3, 4)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 3, 4)
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs until SIP has scheduled them to the SIP cluster
|
||||
@ -107,7 +108,8 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create SIP cluster
|
||||
name := "subcluster-test2"
|
||||
sipCluster := testutil.CreateSIPCluster(name, testNamespace, 3, 4)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 3, 4)
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
@ -153,7 +155,8 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create SIP cluster
|
||||
name := "subcluster-test4"
|
||||
sipCluster := testutil.CreateSIPCluster(name, testNamespace, 3, 4)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 3, 4)
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
@ -215,7 +218,8 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create SIP cluster
|
||||
name := "subcluster-test5"
|
||||
sipCluster := testutil.CreateSIPCluster(name, testNamespace, 1, 2)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 1, 2)
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
@ -276,7 +280,8 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create SIP cluster
|
||||
name := "subcluster-test6"
|
||||
sipCluster := testutil.CreateSIPCluster(name, testNamespace, 2, 1)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 2, 1)
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
@ -336,7 +341,7 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create SIP cluster
|
||||
name := "subcluster-test3"
|
||||
sipCluster := testutil.CreateSIPCluster(name, testNamespace, 1, 2)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 1, 2)
|
||||
|
||||
controlPlaneSpec := sipCluster.Spec.Nodes[airshipv1.VMControlPlane]
|
||||
controlPlaneSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
@ -346,6 +351,7 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
workerSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
sipCluster.Spec.Nodes[airshipv1.VMWorker] = workerSpec
|
||||
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
@ -402,7 +408,7 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create SIP cluster
|
||||
name := "subcluster-test3"
|
||||
sipCluster := testutil.CreateSIPCluster(name, testNamespace, 2, 1)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 2, 1)
|
||||
|
||||
controlPlaneSpec := sipCluster.Spec.Nodes[airshipv1.VMControlPlane]
|
||||
controlPlaneSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
@ -412,6 +418,7 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
workerSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
sipCluster.Spec.Nodes[airshipv1.VMWorker] = workerSpec
|
||||
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@ -78,6 +79,9 @@ var _ = BeforeSuite(func(done Done) {
|
||||
err = metal3.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = corev1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
|
@ -15,8 +15,11 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
@ -35,16 +38,23 @@ import (
|
||||
const (
|
||||
JumpHostServiceName = "jumphost"
|
||||
|
||||
mountPathData = "/etc/opt/sip"
|
||||
mountPathScripts = "/opt/sip/bin"
|
||||
subPathHosts = "hosts"
|
||||
subPathSSHConfig = "ssh_config"
|
||||
|
||||
sshDir = "/home/ubuntu/.ssh"
|
||||
authorizedKeysFile = "authorized_keys"
|
||||
mountPathSSH = sshDir + "/" + authorizedKeysFile
|
||||
|
||||
nameAuthorizedKeysVolume = "authorized-keys"
|
||||
nameHostsVolume = "hosts"
|
||||
nameRebootVolume = "vm"
|
||||
mountPathData = "/etc/opt/sip"
|
||||
mountPathScripts = "/opt/sip/bin"
|
||||
mountPathHosts = mountPathData + "/" + subPathHosts
|
||||
mountPathSSHConfig = sshDir + "/config"
|
||||
mountPathSSH = sshDir + "/" + authorizedKeysFile
|
||||
mountPathNodeSSHPrivateKeys = mountPathData + "/" + nameNodeSSHPrivateKeysVolume
|
||||
|
||||
nameDataVolume = "data"
|
||||
nameScriptsVolume = "scripts"
|
||||
nameAuthorizedKeysVolume = "authorized-keys"
|
||||
nameNodeSSHPrivateKeysVolume = "ssh-private-keys"
|
||||
)
|
||||
|
||||
// JumpHost is an InfrastructureService that provides SSH and power-management capabilities for sub-clusters.
|
||||
@ -81,6 +91,8 @@ func (jh jumpHost) Deploy() error {
|
||||
"app.kubernetes.io/instance": instance,
|
||||
}
|
||||
|
||||
hostAliases := jh.generateHostAliases()
|
||||
|
||||
// TODO: Validate Service becomes ready.
|
||||
service := jh.generateService(instance, labels)
|
||||
jh.logger.Info("Applying service", "service", service.GetNamespace()+"/"+service.GetName())
|
||||
@ -90,8 +102,7 @@ func (jh jumpHost) Deploy() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Validate Secret becomes ready.
|
||||
secret, err := jh.generateSecret(instance, labels)
|
||||
secret, err := jh.generateSecret(instance, labels, hostAliases)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -115,7 +126,7 @@ func (jh jumpHost) Deploy() error {
|
||||
}
|
||||
|
||||
// TODO: Validate Deployment becomes ready.
|
||||
deployment := jh.generateDeployment(instance, labels)
|
||||
deployment := jh.generateDeployment(instance, labels, hostAliases)
|
||||
jh.logger.Info("Applying deployment", "deployment", deployment.GetNamespace()+"/"+deployment.GetName())
|
||||
err = applyRuntimeObject(client.ObjectKey{Name: deployment.GetName(), Namespace: deployment.GetNamespace()},
|
||||
deployment, jh.client)
|
||||
@ -126,7 +137,8 @@ func (jh jumpHost) Deploy() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jh jumpHost) generateDeployment(instance string, labels map[string]string) *appsv1.Deployment {
|
||||
func (jh jumpHost) generateDeployment(instance string, labels map[string]string,
|
||||
hostAliases []corev1.HostAlias) *appsv1.Deployment {
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: instance,
|
||||
@ -166,19 +178,29 @@ func (jh jumpHost) generateDeployment(instance string, labels map[string]string)
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: nameDataVolume,
|
||||
MountPath: mountPathHosts,
|
||||
SubPath: subPathHosts,
|
||||
},
|
||||
{
|
||||
Name: nameScriptsVolume,
|
||||
MountPath: mountPathScripts,
|
||||
},
|
||||
{
|
||||
Name: nameDataVolume,
|
||||
MountPath: mountPathSSHConfig,
|
||||
SubPath: subPathSSHConfig,
|
||||
},
|
||||
{
|
||||
Name: nameNodeSSHPrivateKeysVolume,
|
||||
MountPath: mountPathNodeSSHPrivateKeys,
|
||||
},
|
||||
{
|
||||
Name: nameAuthorizedKeysVolume,
|
||||
MountPath: mountPathSSH,
|
||||
SubPath: authorizedKeysFile,
|
||||
},
|
||||
{
|
||||
Name: nameHostsVolume,
|
||||
MountPath: mountPathData,
|
||||
},
|
||||
{
|
||||
Name: nameRebootVolume,
|
||||
MountPath: mountPathScripts,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -187,7 +209,7 @@ func (jh jumpHost) generateDeployment(instance string, labels map[string]string)
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: nameHostsVolume,
|
||||
Name: nameDataVolume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: instance,
|
||||
@ -213,24 +235,26 @@ func (jh jumpHost) generateDeployment(instance string, labels map[string]string)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: nameRebootVolume,
|
||||
Name: nameScriptsVolume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: instance,
|
||||
},
|
||||
DefaultMode: int32Ptr(0777),
|
||||
Items: []corev1.KeyToPath{
|
||||
{
|
||||
Key: nameRebootVolume,
|
||||
Path: nameRebootVolume,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: nameNodeSSHPrivateKeysVolume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: jh.config.NodeSSHPrivateKeys,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HostAliases: jh.generateHostAliases(),
|
||||
HostAliases: hostAliases,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -281,18 +305,23 @@ func (jh jumpHost) generateConfigMap(instance string, labels map[string]string)
|
||||
},
|
||||
Data: map[string]string{
|
||||
nameAuthorizedKeysVolume: strings.Join(jh.config.SSHAuthorizedKeys, "\n"),
|
||||
nameRebootVolume: fmt.Sprintf(rebootScript, mountPathData, nameHostsVolume),
|
||||
"vm": fmt.Sprintf(rebootScript, mountPathHosts),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (jh jumpHost) generateSecret(instance string, labels map[string]string) (*corev1.Secret, error) {
|
||||
func (jh jumpHost) generateSecret(instance string, labels map[string]string, hostAliases []corev1.HostAlias) (
|
||||
*corev1.Secret, error) {
|
||||
hostData, err := generateHostList(*jh.machines)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sshConfig, err := jh.generateSSHConfig(hostAliases)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &corev1.Secret{
|
||||
secret := &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
Kind: "Secret",
|
||||
@ -303,11 +332,69 @@ func (jh jumpHost) generateSecret(instance string, labels map[string]string) (*c
|
||||
Labels: labels,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
nameHostsVolume: hostData,
|
||||
subPathHosts: hostData,
|
||||
subPathSSHConfig: sshConfig,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func (jh jumpHost) generateSSHConfig(hostAliases []corev1.HostAlias) ([]byte, error) {
|
||||
key := types.NamespacedName{
|
||||
Namespace: jh.sipName.Namespace,
|
||||
Name: jh.config.NodeSSHPrivateKeys,
|
||||
}
|
||||
secret := &corev1.Secret{}
|
||||
if err := jh.client.Get(context.Background(), key, secret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
identityFiles := []string{}
|
||||
for k := range secret.Data {
|
||||
identityFiles = append(identityFiles, mountPathNodeSSHPrivateKeys+"/"+k)
|
||||
}
|
||||
hostNames := []string{}
|
||||
for _, hostAlias := range hostAliases {
|
||||
hostNames = append(hostNames, hostAlias.Hostnames[0])
|
||||
}
|
||||
|
||||
tmpl, err := template.New("ssh-config").Parse(sshConfigTemplate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := sshConfigTemplateData{
|
||||
IdentityFiles: identityFiles,
|
||||
HostNames: hostNames,
|
||||
}
|
||||
|
||||
w := bytes.NewBuffer([]byte{})
|
||||
if err := tmpl.Execute(w, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rendered := w.Bytes()
|
||||
return rendered, nil
|
||||
}
|
||||
|
||||
type sshConfigTemplateData struct {
|
||||
IdentityFiles []string
|
||||
HostNames []string
|
||||
}
|
||||
|
||||
const sshConfigTemplate = `
|
||||
Host *
|
||||
{{- range .IdentityFiles }}
|
||||
IdentityFile {{ . }}
|
||||
{{ end -}}
|
||||
|
||||
{{- range .HostNames }}
|
||||
Host {{ . }}
|
||||
HostName {{ . }}
|
||||
{{ end -}}
|
||||
`
|
||||
|
||||
func (jh jumpHost) generateHostAliases() []corev1.HostAlias {
|
||||
hostAliases := []corev1.HostAlias{}
|
||||
for _, machine := range jh.machines.Machines {
|
||||
@ -410,7 +497,7 @@ var rebootScript = `#!/bin/sh
|
||||
# Support Infrastructure Provider (SIP) VM Utility
|
||||
# DO NOT MODIFY: generated by SIP
|
||||
|
||||
HOSTS_FILE="%s/%s"
|
||||
HOSTS_FILE="%s"
|
||||
|
||||
LIST_COMMAND="list"
|
||||
REBOOT_COMMAND="reboot"
|
||||
|
@ -27,6 +27,9 @@ const (
|
||||
var bmh1 *metal3.BareMetalHost
|
||||
var bmh2 *metal3.BareMetalHost
|
||||
|
||||
var m1 *vbmh.Machine
|
||||
var m2 *vbmh.Machine
|
||||
|
||||
// Re-declared from services package for testing purposes
|
||||
type host struct {
|
||||
Name string `json:"name"`
|
||||
@ -54,7 +57,7 @@ var _ = Describe("Service Set", func() {
|
||||
bmh1.Spec.BMC.CredentialsName = bmcSecret.Name
|
||||
bmh2.Spec.BMC.CredentialsName = bmcSecret.Name
|
||||
|
||||
m1 := &vbmh.Machine{
|
||||
m1 = &vbmh.Machine{
|
||||
BMH: *bmh1,
|
||||
Data: &vbmh.MachineData{
|
||||
IPOnInterface: map[string]string{
|
||||
@ -63,7 +66,7 @@ var _ = Describe("Service Set", func() {
|
||||
},
|
||||
}
|
||||
|
||||
m2 := &vbmh.Machine{
|
||||
m2 = &vbmh.Machine{
|
||||
BMH: *bmh2,
|
||||
Data: &vbmh.MachineData{
|
||||
IPOnInterface: map[string]string{
|
||||
@ -91,8 +94,16 @@ var _ = Describe("Service Set", func() {
|
||||
It("Deploys services", func() {
|
||||
By("Getting machine IPs and creating secrets, pods, and nodeport service")
|
||||
|
||||
sip := testutil.CreateSIPCluster("default", "default", 1, 1)
|
||||
set := services.NewServiceSet(logger, *sip, machineList, k8sClient)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("default", "default", 1, 1)
|
||||
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
|
||||
machineList = &vbmh.MachineList{
|
||||
Machines: map[string]*vbmh.Machine{
|
||||
bmh1.GetName(): m1,
|
||||
bmh2.GetName(): m2,
|
||||
},
|
||||
}
|
||||
|
||||
set := services.NewServiceSet(logger, *sipCluster, machineList, k8sClient)
|
||||
|
||||
serviceList, err := set.ServiceList()
|
||||
Expect(serviceList).To(HaveLen(2))
|
||||
@ -103,12 +114,12 @@ var _ = Describe("Service Set", func() {
|
||||
}
|
||||
|
||||
Eventually(func() error {
|
||||
return testDeployment(sip, *machineList)
|
||||
return testDeployment(sipCluster, *machineList)
|
||||
}, 5, 1).Should(Succeed())
|
||||
})
|
||||
|
||||
It("Does not deploy a Jump Host when an invalid SSH key is provided", func() {
|
||||
sip := testutil.CreateSIPCluster("default", "default", 1, 1)
|
||||
sip, _ := testutil.CreateSIPCluster("default", "default", 1, 1)
|
||||
sip.Spec.Services.Auth = []airshipv1.SIPClusterService{}
|
||||
sip.Spec.Services.LoadBalancer = []airshipv1.SIPClusterService{}
|
||||
sip.Spec.Services.JumpHost[0].SSHAuthorizedKeys = []string{
|
||||
|
@ -124,7 +124,7 @@ var _ = Describe("MachineList", func() {
|
||||
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
|
||||
}
|
||||
|
||||
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster.Spec.Services = airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
@ -137,6 +137,7 @@ var _ = Describe("MachineList", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
objsToApply = append(objsToApply, nodeSSHPrivateKeys)
|
||||
k8sClient := mockClient.NewFakeClient(objsToApply...)
|
||||
Expect(ml.ExtrapolateServiceAddresses(*sipCluster, k8sClient)).To(BeNil())
|
||||
|
||||
@ -174,7 +175,7 @@ var _ = Describe("MachineList", func() {
|
||||
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
|
||||
}
|
||||
|
||||
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster.Spec.Services = airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
@ -187,6 +188,7 @@ var _ = Describe("MachineList", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
objsToApply = append(objsToApply, nodeSSHPrivateKeys)
|
||||
k8sClient := mockClient.NewFakeClient(objsToApply...)
|
||||
Expect(ml.ExtrapolateBMCAuth(*sipCluster, k8sClient)).To(BeNil())
|
||||
|
||||
@ -222,7 +224,7 @@ var _ = Describe("MachineList", func() {
|
||||
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
|
||||
}
|
||||
|
||||
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster.Spec.Services = airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
@ -235,6 +237,7 @@ var _ = Describe("MachineList", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
objsToApply = append(objsToApply, nodeSSHPrivateKeys)
|
||||
k8sClient := mockClient.NewFakeClient(objsToApply...)
|
||||
Expect(ml.ExtrapolateBMCAuth(*sipCluster, k8sClient)).ToNot(BeNil())
|
||||
})
|
||||
@ -274,7 +277,7 @@ var _ = Describe("MachineList", func() {
|
||||
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
|
||||
}
|
||||
|
||||
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster.Spec.Services = airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
@ -287,6 +290,7 @@ var _ = Describe("MachineList", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
objsToApply = append(objsToApply, nodeSSHPrivateKeys)
|
||||
k8sClient := mockClient.NewFakeClient(objsToApply...)
|
||||
Expect(ml.ExtrapolateBMCAuth(*sipCluster, k8sClient)).ToNot(BeNil())
|
||||
})
|
||||
@ -320,7 +324,7 @@ var _ = Describe("MachineList", func() {
|
||||
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
|
||||
}
|
||||
|
||||
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster.Spec.Services = airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
@ -333,6 +337,7 @@ var _ = Describe("MachineList", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
objsToApply = append(objsToApply, nodeSSHPrivateKeys)
|
||||
k8sClient := mockClient.NewFakeClient(objsToApply...)
|
||||
Expect(ml.ExtrapolateServiceAddresses(*sipCluster, k8sClient)).ToNot(BeNil())
|
||||
})
|
||||
@ -365,7 +370,7 @@ var _ = Describe("MachineList", func() {
|
||||
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
|
||||
}
|
||||
|
||||
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster.Spec.Services = airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
@ -378,22 +383,24 @@ var _ = Describe("MachineList", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
objsToApply = append(objsToApply, nodeSSHPrivateKeys)
|
||||
k8sClient := mockClient.NewFakeClient(objsToApply...)
|
||||
Expect(ml.ExtrapolateServiceAddresses(*sipCluster, k8sClient)).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("Should not retrieve the BMH IP if it has been previously extrapolated", func() {
|
||||
// Store an IP address for each machine
|
||||
var objs []runtime.Object
|
||||
var objectsToApply []runtime.Object
|
||||
for _, machine := range machineList.Machines {
|
||||
machine.Data.IPOnInterface = map[string]string{
|
||||
"oam-ipv4": "32.68.51.139",
|
||||
}
|
||||
objs = append(objs, &machine.BMH)
|
||||
objectsToApply = append(objectsToApply, &machine.BMH)
|
||||
}
|
||||
|
||||
k8sClient := mockClient.NewFakeClient(objs...)
|
||||
sipCluster := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("subcluster-1", "default", 1, 3)
|
||||
objectsToApply = append(objectsToApply, nodeSSHPrivateKeys)
|
||||
k8sClient := mockClient.NewFakeClient(objectsToApply...)
|
||||
Expect(machineList.ExtrapolateServiceAddresses(*sipCluster, k8sClient)).To(BeNil())
|
||||
})
|
||||
|
||||
|
@ -23,6 +23,8 @@ const (
|
||||
|
||||
VinoFlavorLabel = "vino.airshipit.org/flavor"
|
||||
|
||||
sshPrivateKeyBase64 = "DUMMY_DATA"
|
||||
|
||||
networkDataContent = `
|
||||
{
|
||||
"links": [
|
||||
@ -207,59 +209,72 @@ func CreateBMH(node int, namespace string, role airshipv1.VMRole, rack int) (*me
|
||||
}
|
||||
|
||||
// CreateSIPCluster initializes a SIPCluster with specific parameters for use in test cases.
|
||||
func CreateSIPCluster(name string, namespace string, controlPlanes int, workers int) *airshipv1.SIPCluster {
|
||||
func CreateSIPCluster(name string, namespace string, controlPlanes int, workers int) (
|
||||
*airshipv1.SIPCluster, *corev1.Secret) {
|
||||
sshPrivateKeySecretName := fmt.Sprintf("%s-ssh-private-key", name)
|
||||
return &airshipv1.SIPCluster{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "SIPCluster",
|
||||
APIVersion: "airship.airshipit.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: airshipv1.SIPClusterSpec{
|
||||
Nodes: map[airshipv1.VMRole]airshipv1.NodeSet{
|
||||
airshipv1.VMControlPlane: {
|
||||
VMFlavor: "vino.airshipit.org/flavor=" + vinoFlavorMap[airshipv1.VMControlPlane],
|
||||
Scheduling: airshipv1.HostAntiAffinity,
|
||||
Count: &airshipv1.VMCount{
|
||||
Active: controlPlanes,
|
||||
Standby: 0,
|
||||
},
|
||||
},
|
||||
airshipv1.VMWorker: {
|
||||
VMFlavor: "vino.airshipit.org/flavor=" + vinoFlavorMap[airshipv1.VMWorker],
|
||||
Scheduling: airshipv1.HostAntiAffinity,
|
||||
Count: &airshipv1.VMCount{
|
||||
Active: workers,
|
||||
Standby: 0,
|
||||
},
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "SIPCluster",
|
||||
APIVersion: "airship.airshipit.org/v1",
|
||||
},
|
||||
Services: airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
NodeInterface: "eno3",
|
||||
NodePort: 30000,
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: airshipv1.SIPClusterSpec{
|
||||
Nodes: map[airshipv1.VMRole]airshipv1.NodeSet{
|
||||
airshipv1.VMControlPlane: {
|
||||
VMFlavor: "vino.airshipit.org/flavor=" + vinoFlavorMap[airshipv1.VMControlPlane],
|
||||
Scheduling: airshipv1.HostAntiAffinity,
|
||||
Count: &airshipv1.VMCount{
|
||||
Active: controlPlanes,
|
||||
Standby: 0,
|
||||
},
|
||||
},
|
||||
airshipv1.VMWorker: {
|
||||
VMFlavor: "vino.airshipit.org/flavor=" + vinoFlavorMap[airshipv1.VMWorker],
|
||||
Scheduling: airshipv1.HostAntiAffinity,
|
||||
Count: &airshipv1.VMCount{
|
||||
Active: workers,
|
||||
Standby: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
JumpHost: []airshipv1.JumpHostService{
|
||||
{
|
||||
SIPClusterService: airshipv1.SIPClusterService{
|
||||
Image: "quay.io/airshipit/jump-host",
|
||||
NodePort: 30001,
|
||||
Services: airshipv1.SIPClusterServices{
|
||||
LoadBalancer: []airshipv1.SIPClusterService{
|
||||
{
|
||||
NodeInterface: "eno3",
|
||||
NodePort: 30000,
|
||||
},
|
||||
SSHAuthorizedKeys: []string{
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCyaozS8kZRw2a1d0O4YXhxtJlDPThqIZilGCsXLbukIFOyMUmMTwQAtwWp5epwU1+5ponC2uBENB6xCCj3cl5Rd43d2/B6HxyAPQGKo6/zKYGAKW2nzYDxSWMl6NUSsiJAyXUA7ZlNZQe0m8PmaferlkQyLLZo3NJpizz6U6ZCtxvj43vEl7NYWnLUEIzGP9zMqltIGnD4vYrU9keVKKXSsp+DkApnbrDapeigeGATCammy2xRrUQDuOvGHsfnQbXr2j0onpTIh0PiLrXLQAPDg8UJRgVB+ThX+neI3rQ320djzRABckNeE6e4Kkwzn+QdZsmA2SDvM9IU7boK1jVQlgUPp7zF5q3hbb8Rx7AadyTarBayUkCgNlrMqth+tmTMWttMqCPxJRGnhhvesAHIl55a28Kzz/2Oqa3J9zwzbyDIwlEXho0eAq3YXEPeBhl34k+7gOt/5Zdbh+yacFoxDh0LrshQgboAijcVVaXPeN0LsHEiVvYIzugwIvCkoFMPWoPj/kEGzPY6FCkVneDA7VoLTCoG8dlrN08Lf05/BGC7Wllm66pTNZC/cKXP+cjpQn1iEuiuPxnPldlMHx9sx2y/BRoft6oT/GzqkNy1NTY/xI+MfmxXnF5kwSbcTbzZQ9fZ8xjh/vmpPBgDNrxOEAT4N6OG7GQIhb9HEhXQCQ== example-key", //nolint
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwpOyZjZ4gB0OTvmofH3llh6cBCWaEiEmHZWSkDXr8Bih6HcXVOtYMcFi/ZnUVGUBPw3ATNQBZUaVCYKeF+nDfKTJ9hmnlsyHxV2LeMsVg1o15Pb6f+QJuavEqtE6HI7mHyId4Z1quVTJXDWDW8OZEG7M3VktauqAn/e9UJvlL0bGmTFD1XkNcbRsWMRWkQgt2ozqlgrpPtvrg2/+bNucxX++VUjnsn+fGgAT07kbnrZwppGnAfjbYthxhv7GeSD0+Z0Lf1kiKy/bhUqXsZIuexOfF0YrRyUH1KBl8GCX2OLBYvXHyusByqsrOPiROqRdjX5PsK6HSAS0lk0niTt1p example-key-2", // nolint
|
||||
},
|
||||
JumpHost: []airshipv1.JumpHostService{
|
||||
{
|
||||
SIPClusterService: airshipv1.SIPClusterService{
|
||||
Image: "quay.io/airshipit/jump-host",
|
||||
NodePort: 30001,
|
||||
NodeInterface: "eno3",
|
||||
},
|
||||
SSHAuthorizedKeys: []string{
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCyaozS8kZRw2a1d0O4YXhxtJlDPThqIZilGCsXLbukIFOyMUmMTwQAtwWp5epwU1+5ponC2uBENB6xCCj3cl5Rd43d2/B6HxyAPQGKo6/zKYGAKW2nzYDxSWMl6NUSsiJAyXUA7ZlNZQe0m8PmaferlkQyLLZo3NJpizz6U6ZCtxvj43vEl7NYWnLUEIzGP9zMqltIGnD4vYrU9keVKKXSsp+DkApnbrDapeigeGATCammy2xRrUQDuOvGHsfnQbXr2j0onpTIh0PiLrXLQAPDg8UJRgVB+ThX+neI3rQ320djzRABckNeE6e4Kkwzn+QdZsmA2SDvM9IU7boK1jVQlgUPp7zF5q3hbb8Rx7AadyTarBayUkCgNlrMqth+tmTMWttMqCPxJRGnhhvesAHIl55a28Kzz/2Oqa3J9zwzbyDIwlEXho0eAq3YXEPeBhl34k+7gOt/5Zdbh+yacFoxDh0LrshQgboAijcVVaXPeN0LsHEiVvYIzugwIvCkoFMPWoPj/kEGzPY6FCkVneDA7VoLTCoG8dlrN08Lf05/BGC7Wllm66pTNZC/cKXP+cjpQn1iEuiuPxnPldlMHx9sx2y/BRoft6oT/GzqkNy1NTY/xI+MfmxXnF5kwSbcTbzZQ9fZ8xjh/vmpPBgDNrxOEAT4N6OG7GQIhb9HEhXQCQ== example-key", //nolint
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwpOyZjZ4gB0OTvmofH3llh6cBCWaEiEmHZWSkDXr8Bih6HcXVOtYMcFi/ZnUVGUBPw3ATNQBZUaVCYKeF+nDfKTJ9hmnlsyHxV2LeMsVg1o15Pb6f+QJuavEqtE6HI7mHyId4Z1quVTJXDWDW8OZEG7M3VktauqAn/e9UJvlL0bGmTFD1XkNcbRsWMRWkQgt2ozqlgrpPtvrg2/+bNucxX++VUjnsn+fGgAT07kbnrZwppGnAfjbYthxhv7GeSD0+Z0Lf1kiKy/bhUqXsZIuexOfF0YrRyUH1KBl8GCX2OLBYvXHyusByqsrOPiROqRdjX5PsK6HSAS0lk0niTt1p example-key-2", // nolint
|
||||
},
|
||||
NodeSSHPrivateKeys: sshPrivateKeySecretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: airshipv1.SIPClusterStatus{},
|
||||
},
|
||||
Status: airshipv1.SIPClusterStatus{},
|
||||
}
|
||||
&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: sshPrivateKeySecretName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"key": []byte(sshPrivateKeyBase64),
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateBMCAuthSecret creates a K8s Secret that matches the Metal3.io BaremetalHost credential format for use in test
|
||||
|
Loading…
x
Reference in New Issue
Block a user