Remove contrib/bash-completion and demos
demos are so old, that they are not compatible with current Magnum, Tacker and Heat code - and we're not going to update them. kolla-ansible bash-complation is not irrelevant, because the client was rewritten to python Change-Id: Idb2ad18eca72756571bbd854ad392c9999685703
This commit is contained in:
parent
f15c0d3d48
commit
c1e566016d
@ -1,21 +0,0 @@
|
||||
_kolla_ansible() {
|
||||
local cur prev opts
|
||||
|
||||
COMPREPLY=()
|
||||
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
kolla_ansible_opts="$(kolla-ansible bash-completion)"
|
||||
kolla_ansible_flags="$(echo ${kolla_ansible_opts} | sed 's/ [^-][a-z0-9_-]*//g' )"
|
||||
kolla_ansible_actions="$(echo ${kolla_ansible_opts} | sed 's/--[a-z0-9-]*//g' | sed 's/ -[a-z]//g' )"
|
||||
|
||||
if [[ ${cur} == -* ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${kolla_ansible_flags}" -- ${cur}) )
|
||||
return 0
|
||||
else
|
||||
COMPREPLY=( $(compgen -W "${kolla_ansible_actions}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
complete -F _kolla_ansible -A file kolla-ansible
|
@ -1,15 +0,0 @@
|
||||
A Kolla Demo using Heat
|
||||
=======================
|
||||
|
||||
By default, the launch script will spawn 3 Nova instances on a Neutron
|
||||
network created from the tools/init-runonce script. Edit the VM\_COUNT
|
||||
parameter in the launch script if you would like to spawn a different
|
||||
amount of Nova instances. Edit the IMAGE\_FLAVOR if you would like to
|
||||
launch images using a flavor other than m1.tiny.
|
||||
|
||||
Then run the script:
|
||||
|
||||
::
|
||||
|
||||
$ ./launch
|
||||
|
@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
VM_COUNT=3
|
||||
IMAGE_FLAVOR=m1.small
|
||||
PUBLIC_NET_ID=$(openstack network show public1 | awk '/ id /{print $4}')
|
||||
DEMO_NET_ID=$(openstack network show demo-net | awk '/ id /{print $4}')
|
||||
DEMO_SUBNET_ID=$(openstack subnet show demo-subnet | awk '/ id /{print $4}')
|
||||
|
||||
echo "Public net id is $PUBLIC_NET_ID"
|
||||
echo "Demo net id is $DEMO_NET_ID"
|
||||
echo "Demo subnet id is $DEMO_SUBNET_ID"
|
||||
|
||||
openstack stack create --parameter vm_count=$VM_COUNT \
|
||||
--parameter image_flavor=$IMAGE_FLAVOR \
|
||||
--parameter public_net_id=$PUBLIC_NET_ID \
|
||||
--parameter demo_net_id=$DEMO_NET_ID \
|
||||
--parameter demo_subnet_id=$DEMO_SUBNET_ID \
|
||||
-t steak-rg.yaml steak
|
@ -1,44 +0,0 @@
|
||||
---
|
||||
heat_template_version: 2013-05-23
|
||||
|
||||
parameters:
|
||||
public_net_id:
|
||||
type: string
|
||||
description: uuid of a network to use for floating ip addresses
|
||||
|
||||
demo_net_id:
|
||||
type: string
|
||||
description: uuid of a subnet on the fixed network to use for creating ports
|
||||
|
||||
demo_subnet_id:
|
||||
type: string
|
||||
description: uuid of a subnet on the fixed network to use for creating ports
|
||||
|
||||
vm_count:
|
||||
type: string
|
||||
description: Number of VMs to launch
|
||||
|
||||
image_flavor:
|
||||
type: string
|
||||
description: Image flavor to use when launching VMs
|
||||
|
||||
resources:
|
||||
steak:
|
||||
type: OS::Heat::ResourceGroup
|
||||
properties:
|
||||
count:
|
||||
get_param: vm_count
|
||||
resource_def:
|
||||
type: steak.yaml
|
||||
properties:
|
||||
image_flavor: {get_param: image_flavor}
|
||||
public_net_id: {get_param: public_net_id}
|
||||
demo_net_id: {get_param: demo_net_id}
|
||||
demo_subnet_id: {get_param: demo_subnet_id}
|
||||
|
||||
outputs:
|
||||
eth0:
|
||||
value: {get_attr: [steak, eth0]}
|
||||
|
||||
float:
|
||||
value: {get_attr: [steak, float]}
|
@ -1,55 +0,0 @@
|
||||
---
|
||||
heat_template_version: 2013-05-23
|
||||
|
||||
parameters:
|
||||
public_net_id:
|
||||
type: string
|
||||
description: uuid of a network to use for floating ip addresses
|
||||
|
||||
demo_net_id:
|
||||
type: string
|
||||
description: uuid of a subnet on the fixed network to use for creating ports
|
||||
|
||||
demo_subnet_id:
|
||||
type: string
|
||||
description: uuid of a subnet on the fixed network to use for creating ports
|
||||
|
||||
image_flavor:
|
||||
type: string
|
||||
description: Number of VMs to launch
|
||||
|
||||
resources:
|
||||
steak_node:
|
||||
type: "OS::Nova::Server"
|
||||
properties:
|
||||
key_name: mykey
|
||||
image: cirros
|
||||
flavor:
|
||||
get_param: image_flavor
|
||||
networks:
|
||||
- port:
|
||||
get_resource: steak_node_eth0
|
||||
|
||||
steak_node_eth0:
|
||||
type: "OS::Neutron::Port"
|
||||
properties:
|
||||
network_id:
|
||||
get_param: demo_net_id
|
||||
fixed_ips:
|
||||
- subnet_id:
|
||||
get_param: demo_subnet_id
|
||||
|
||||
steak_node_floating:
|
||||
type: "OS::Neutron::FloatingIP"
|
||||
properties:
|
||||
floating_network_id:
|
||||
get_param: public_net_id
|
||||
port_id:
|
||||
get_resource: steak_node_eth0
|
||||
|
||||
outputs:
|
||||
eth0:
|
||||
value: {get_attr: [steak_node_eth0, fixed_ips, 0, ip_address]}
|
||||
|
||||
float:
|
||||
value: {get_attr: [steak_node_floating, floating_ip_address]}
|
@ -1,5 +0,0 @@
|
||||
magnum pod-create --manifest redis-kube/redis-master.yaml --bay testbay
|
||||
magnum service-create --manifest redis-kube/redis-sentinel-service.yaml --bay testbay
|
||||
magnum rc-create --manifest redis-kube/redis-controller.yaml --bay testbay
|
||||
magnum rc-create --manifest redis-kube/redis-sentinel-controller.yaml --bay testbay
|
||||
|
@ -1,197 +0,0 @@
|
||||
Reliable, Scalable Redis on Kubernetes
|
||||
--------------------------------------
|
||||
|
||||
The following document describes the deployment of a reliable,
|
||||
multi-node Redis on Kubernetes. It deploys a master with replicated
|
||||
slaves, as well as replicated redis sentinels which are use for health
|
||||
checking and failover.
|
||||
|
||||
Prerequisites
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
This example assumes that you have a Kubernetes cluster installed and
|
||||
running, and that you have installed the ``kubectl`` command line tool
|
||||
somewhere in your path. Please see the `getting
|
||||
started <https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/getting-started-guides>`__
|
||||
for installation instructions for your platform.
|
||||
|
||||
A note for the impatient
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is a somewhat long tutorial. If you want to jump straight to the
|
||||
"do it now" commands, please see the `tl; dr <#tl-dr>`__ at the end.
|
||||
|
||||
Turning up an initial master/sentinel pod.
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
is a
|
||||
`*Pod* <https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/pods.md>`__.
|
||||
A Pod is one or more containers that *must* be scheduled onto the same
|
||||
host. All containers in a pod share a network namespace, and may
|
||||
optionally share mounted volumes.
|
||||
|
||||
We will used the shared network namespace to bootstrap our Redis
|
||||
cluster. In particular, the very first sentinel needs to know how to
|
||||
find the master (subsequent sentinels just ask the first sentinel).
|
||||
Because all containers in a Pod share a network namespace, the sentinel
|
||||
can simply look at ``$(hostname -i):6379``.
|
||||
|
||||
Here is the config for the initial master and sentinel pod:
|
||||
`redis-master.yaml <redis-master.yaml>`__
|
||||
|
||||
Create this master as follows:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
kubectl create -f examples/redis/v1beta3/redis-master.yaml
|
||||
|
||||
Turning up a sentinel service
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
In Kubernetes a *Service* describes a set of Pods that perform the same
|
||||
task. For example, the set of nodes in a Cassandra cluster, or even the
|
||||
single node we created above. An important use for a Service is to
|
||||
create a load balancer which distributes traffic across members of the
|
||||
set. But a *Service* can also be used as a standing query which makes a
|
||||
dynamically changing set of Pods (or the single Pod we've already
|
||||
created) available via the Kubernetes API.
|
||||
|
||||
In Redis, we will use a Kubernetes Service to provide a discoverable
|
||||
endpoints for the Redis sentinels in the cluster. From the sentinels
|
||||
Redis clients can find the master, and then the slaves and other
|
||||
relevant info for the cluster. This enables new members to join the
|
||||
cluster when failures occur.
|
||||
|
||||
Here is the definition of the sentinel
|
||||
service:\ `redis-sentinel-service.yaml <redis-sentinel-service.yaml>`__
|
||||
|
||||
Create this service:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
kubectl create -f examples/redis/v1beta3/redis-sentinel-service.yaml
|
||||
|
||||
Turning up replicated redis servers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
So far, what we have done is pretty manual, and not very fault-tolerant.
|
||||
If the ``redis-master`` pod that we previously created is destroyed for
|
||||
some reason (e.g. a machine dying) our Redis service goes away with it.
|
||||
|
||||
In Kubernetes a *Replication Controller* is responsible for replicating
|
||||
sets of identical pods. Like a *Service* it has a selector query which
|
||||
identifies the members of it's set. Unlike a *Service* it also has a
|
||||
desired number of replicas, and it will create or delete *Pods* to
|
||||
ensure that the number of *Pods* matches up with it's desired state.
|
||||
|
||||
Replication Controllers will "adopt" existing pods that match their
|
||||
selector query, so let's create a Replication Controller with a single
|
||||
replica to adopt our existing Redis server.
|
||||
`redis-controller.yaml <redis-controller.yaml>`__
|
||||
|
||||
The bulk of this controller config is actually identical to the
|
||||
redis-master pod definition above. It forms the template or "cookie
|
||||
cutter" that defines what it means to be a member of this set.
|
||||
|
||||
Create this controller:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
kubectl create -f examples/redis/v1beta3/redis-controller.yaml
|
||||
|
||||
We'll do the same thing for the sentinel. Here is the controller
|
||||
config:\ `redis-sentinel-controller.yaml <redis-sentinel-controller.yaml>`__
|
||||
|
||||
We create it as follows:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
kubectl create -f examples/redis/v1beta3/redis-sentinel-controller.yaml
|
||||
|
||||
Resize our replicated pods
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Initially creating those pods didn't actually do anything, since we only
|
||||
asked for one sentinel and one redis server, and they already existed,
|
||||
nothing changed. Now we will add more replicas:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
kubectl resize rc redis --replicas=3
|
||||
|
||||
.. code:: sh
|
||||
|
||||
kubectl resize rc redis-sentinel --replicas=3
|
||||
|
||||
This will create two additional replicas of the redis server and two
|
||||
additional replicas of the redis sentinel.
|
||||
|
||||
Unlike our original redis-master pod, these pods exist independently,
|
||||
and they use the ``redis-sentinel-service`` that we defined above to
|
||||
discover and join the cluster.
|
||||
|
||||
Delete our manual pod
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The final step in the cluster turn up is to delete the original
|
||||
redis-master pod that we created manually. While it was useful for
|
||||
bootstrapping discovery in the cluster, we really don't want the
|
||||
lifespan of our sentinel to be tied to the lifespan of one of our redis
|
||||
servers, and now that we have a successful, replicated redis sentinel
|
||||
service up and running, the binding is unnecessary.
|
||||
|
||||
Delete the master as follows:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
kubectl delete pods redis-master
|
||||
|
||||
Now let's take a close look at what happens after this pod is deleted.
|
||||
There are three things that happen:
|
||||
|
||||
1. The redis replication controller notices that its desired state is 3
|
||||
replicas, but there are currently only 2 replicas, and so it creates
|
||||
a new redis server to bring the replica count back up to 3
|
||||
2. The redis-sentinel replication controller likewise notices the
|
||||
missing sentinel, and also creates a new sentinel.
|
||||
3. The redis sentinels themselves, realize that the master has
|
||||
disappeared from the cluster, and begin the election procedure for
|
||||
selecting a new master. They perform this election and selection, and
|
||||
chose one of the existing redis server replicas to be the new master.
|
||||
|
||||
Conclusion
|
||||
~~~~~~~~~~
|
||||
|
||||
At this point we now have a reliable, scalable Redis installation. By
|
||||
resizing the replication controller for redis servers, we can increase
|
||||
or decrease the number of read-slaves in our cluster. Likewise, if
|
||||
failures occur, the redis-sentinels will perform master election and
|
||||
select a new master.
|
||||
|
||||
tl; dr
|
||||
~~~~~~
|
||||
|
||||
For those of you who are impatient, here is the summary of commands we
|
||||
ran in this tutorial
|
||||
|
||||
.. code:: sh
|
||||
|
||||
# Create a bootstrap master
|
||||
kubectl create -f examples/redis/v1beta3/redis-master.yaml
|
||||
|
||||
# Create a service to track the sentinels
|
||||
kubectl create -f examples/redis/v1beta3/redis-sentinel-service.yaml
|
||||
|
||||
# Create a replication controller for redis servers
|
||||
kubectl create -f examples/redis/v1beta3/redis-controller.yaml
|
||||
|
||||
# Create a replication controller for redis sentinels
|
||||
kubectl create -f examples/redis/v1beta3/redis-sentinel-controller.yaml
|
||||
|
||||
# Resize both replication controllers
|
||||
kubectl resize rc redis --replicas=3
|
||||
kubectl resize rc redis-sentinel --replicas=3
|
||||
|
||||
# Delete the original master pod
|
||||
kubectl delete pods redis-master
|
||||
|
@ -1,28 +0,0 @@
|
||||
---
|
||||
apiVersion: v1beta3
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
name: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: kubernetes/redis:v1
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
@ -1,34 +0,0 @@
|
||||
---
|
||||
apiVersion: v1beta3
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
redis-sentinel: "true"
|
||||
role: master
|
||||
name: redis-master
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: MASTER
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
- name: sentinel
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
apiVersion: v1beta3
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis-proxy
|
||||
role: proxy
|
||||
name: redis-proxy
|
||||
spec:
|
||||
containers:
|
||||
- name: proxy
|
||||
image: kubernetes/redis-proxy:v1
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
name: api
|
@ -1,24 +0,0 @@
|
||||
---
|
||||
apiVersion: v1beta3
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis-sentinel
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
redis-sentinel: "true"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: redis-sentinel
|
||||
redis-sentinel: "true"
|
||||
role: sentinel
|
||||
spec:
|
||||
containers:
|
||||
- name: sentinel
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
apiVersion: v1beta3
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: sentinel
|
||||
role: service
|
||||
name: redis-sentinel
|
||||
spec:
|
||||
ports:
|
||||
- port: 26379
|
||||
targetPort: 26379
|
||||
selector:
|
||||
redis-sentinel: "true"
|
@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
IMAGE_URL=https://fedorapeople.org/groups/magnum
|
||||
IMAGE_NAME=fedora-21-atomic-6
|
||||
IMAGE=${IMAGE_NAME}.qcow2
|
||||
if ! [ -f "$IMAGE" ]; then
|
||||
echo "Downloading ${IMAGE_NAME} image"
|
||||
curl -L -o ./$IMAGE $IMAGE_URL/$IMAGE
|
||||
fi
|
||||
|
||||
NIC_ID=$(openstack network show public1 | awk '/ id /{print $4}')
|
||||
|
||||
openstack image delete ${IMAGE_NAME} 2> /dev/null
|
||||
|
||||
echo "Loading ${IMAGE_NAME} image into glance"
|
||||
openstack image create --public --disk-format qcow2 --container-format bare --file ./$IMAGE ${IMAGE_NAME}
|
||||
GLANCE_IMAGE_ID=$(openstack image show ${IMAGE_NAME} | grep id | awk '{print $4}')
|
||||
|
||||
echo "Registering os-distro property with image"
|
||||
openstack image set $GLANCE_IMAGE_ID --property os_distro=fedora-atomic
|
||||
|
||||
echo "Creating cluster-template"
|
||||
magnum cluster-template-create \
|
||||
--name testclustertemplate \
|
||||
--image $GLANCE_IMAGE_ID \
|
||||
--keypair mykey \
|
||||
--fixed-network 10.0.3.0/24 \
|
||||
--external-network $NIC_ID \
|
||||
--tls-disabled \
|
||||
--dns-nameserver 8.8.8.8 --flavor m1.small \
|
||||
--docker-volume-size 5 --coe kubernetes
|
||||
|
||||
echo "Creating cluster"
|
||||
magnum cluster-create --name testcluster --cluster-template testclustertemplate --node-count 2
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Deleting cluster"
|
||||
magnum cluster-delete testcluster
|
||||
while magnum cluster-list | grep -q testcluster; do
|
||||
sleep 1
|
||||
done
|
||||
echo "Deleting cluster-template"
|
||||
magnum cluster-template-delete testclustertemplate
|
@ -1,20 +0,0 @@
|
||||
A Kolla Demo using Tacker
|
||||
=========================
|
||||
|
||||
By default, the deploy script will spawn 1 Nova instance on a Neutron
|
||||
network created from the tools/init-runonce script.
|
||||
|
||||
Then run the deploy script:
|
||||
|
||||
::
|
||||
|
||||
$ ./deploy-tacker-demo
|
||||
|
||||
After the demo is deployed, a cleanup script can be used to remove
|
||||
resources created by deploy script.
|
||||
|
||||
To run the cleanup script:
|
||||
|
||||
::
|
||||
|
||||
$ ./cleanup-tacker
|
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [[ -f kolla-sample-vnffgd.yaml ]]; then
|
||||
echo "Deleting VNFFG"
|
||||
openstack vnf graph delete kolla-sample-vnffg
|
||||
echo "Deleting VNFFGD"
|
||||
openstack vnf graph descriptor delete kolla-sample-vnffgd
|
||||
echo "Deleting sample sfc instances"
|
||||
openstack server delete kolla_sfc_server kolla_sfc_client
|
||||
fi
|
||||
echo "Deleting sample VNF"
|
||||
openstack vnf delete kolla-sample-vnf
|
||||
while openstack vnf list | grep -q kolla-sample-vnf; do
|
||||
sleep 1
|
||||
done
|
||||
echo "Deleting sample VNFD"
|
||||
openstack vnf descriptor delete kolla-sample-vnfd
|
||||
echo "Deleting sample VIM"
|
||||
openstack vim delete kolla-sample-vim
|
||||
echo "Removing sample config"
|
||||
rm -rf ./kolla-sample-*
|
@ -1,73 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function gen_config {
|
||||
echo "Generating sample config"
|
||||
DEMO_NET=$(openstack network list | awk '/demo-net/ { print $2 }')
|
||||
IMAGE_ID=$(openstack image list | awk '/cirros/ { print $2 }')
|
||||
cat > ./kolla-sample-vim.yaml <<EOF
|
||||
auth_url: ${OS_AUTH_URL}
|
||||
username: ${OS_USERNAME}
|
||||
password: ${OS_PASSWORD}
|
||||
project_name: ${OS_PROJECT_NAME}
|
||||
project_domain_name: ${OS_PROJECT_DOMAIN_NAME}
|
||||
user_domain_name: ${OS_USER_DOMAIN_NAME}
|
||||
EOF
|
||||
cat > ./kolla-sample-vnfd.yaml <<EOF
|
||||
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
|
||||
|
||||
description: Demo example
|
||||
|
||||
metadata:
|
||||
template_name: sample-tosca-vnfd
|
||||
|
||||
topology_template:
|
||||
node_templates:
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.VDU.Tacker
|
||||
capabilities:
|
||||
nfv_compute:
|
||||
properties:
|
||||
num_cpus: 1
|
||||
mem_size: 512 MB
|
||||
disk_size: 0 GB
|
||||
properties:
|
||||
image: ${IMAGE_ID}
|
||||
availability_zone: nova
|
||||
mgmt_driver: noop
|
||||
user_data_format: RAW
|
||||
user_data: |
|
||||
#!/bin/sh
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
|
||||
CP11:
|
||||
type: tosca.nodes.nfv.CP.Tacker
|
||||
properties:
|
||||
management: true
|
||||
order: 0
|
||||
anti_spoofing_protection: false
|
||||
requirements:
|
||||
- virtualLink:
|
||||
node: VL1
|
||||
- virtualBinding:
|
||||
node: VDU1
|
||||
|
||||
VL1:
|
||||
type: tosca.nodes.nfv.VL
|
||||
properties:
|
||||
network_name: ${DEMO_NET}
|
||||
vendor: Tacker
|
||||
EOF
|
||||
}
|
||||
|
||||
function deploy {
|
||||
echo "Registering sample VIM"
|
||||
openstack vim register --config-file ./kolla-sample-vim.yaml --description "kolla sample vim" --is-default kolla-sample-vim
|
||||
echo "Creating sample VNFD"
|
||||
openstack vnf descriptor create --vnfd-file ./kolla-sample-vnfd.yaml kolla-sample-vnfd
|
||||
echo "Creating sample VNF"
|
||||
VNFD_ID=$(openstack vnf descriptor list | awk '/kolla-sample-vnfd/ { print $2 }')
|
||||
openstack vnf create --vnfd-id ${VNFD_ID} kolla-sample-vnf
|
||||
}
|
||||
|
||||
gen_config
|
||||
deploy
|
@ -1,83 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function create_servers {
|
||||
echo "Creating SFC demo instances"
|
||||
DEMO_NET=$(openstack network list | awk '/demo-net/ { print $2 }')
|
||||
IMAGE_ID=$(openstack image list | awk '/cirros/ { print $2 }')
|
||||
FLOATING_IP_CLIENT=$(openstack floating ip create public1 -c floating_ip_address -f value)
|
||||
FLOATING_IP_SERVER=$(openstack floating ip create public1 -c floating_ip_address -f value)
|
||||
openstack server create --wait --flavor m1.tiny --image $IMAGE_ID --nic net-id=$DEMO_NET kolla_sfc_server
|
||||
openstack server create --wait --flavor m1.tiny --image $IMAGE_ID --nic net-id=$DEMO_NET kolla_sfc_client
|
||||
openstack server add floating ip kolla_sfc_client $FLOATING_IP_CLIENT
|
||||
openstack server add floating ip kolla_sfc_server $FLOATING_IP_SERVER
|
||||
KOLLA_SFC_CLIENT_PORT=$(openstack port list --server kolla_sfc_client | awk '/ACTIVE/ {print $2}')
|
||||
}
|
||||
|
||||
function sfc_gen_config {
|
||||
echo "Tacker SFC config files"
|
||||
cat > ./kolla-sample-vnffgd.yaml <<EOF
|
||||
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
|
||||
|
||||
description: Sample VNFFG template
|
||||
|
||||
topology_template:
|
||||
description: Sample VNFFG template
|
||||
|
||||
node_templates:
|
||||
|
||||
Forwarding_path1:
|
||||
type: tosca.nodes.nfv.FP.TackerV2
|
||||
description: creates path (CP12->CP12)
|
||||
properties:
|
||||
id: 51
|
||||
policy:
|
||||
type: ACL
|
||||
criteria:
|
||||
- name: block_http
|
||||
classifier:
|
||||
network_src_port_id: ${KOLLA_SFC_CLIENT_PORT}
|
||||
network_id: ${DEMO_NET}
|
||||
ip_proto: 6
|
||||
destination_port_range: 80-80
|
||||
path:
|
||||
- forwarder: kolla-sample-vnfd
|
||||
capability: CP11
|
||||
|
||||
groups:
|
||||
VNFFG1:
|
||||
type: tosca.groups.nfv.VNFFG
|
||||
description: HTTP to Corporate Net
|
||||
properties:
|
||||
vendor: tacker
|
||||
version: 1.0
|
||||
number_of_endpoints: 1
|
||||
dependent_virtual_link: [VL1]
|
||||
connection_point: [CP11]
|
||||
constituent_vnfs: [kolla-sample-vnfd]
|
||||
members: [Forwarding_path1]
|
||||
EOF
|
||||
}
|
||||
|
||||
function deploy_sfc {
|
||||
bash ./deploy-tacker-demo
|
||||
create_servers
|
||||
sfc_gen_config
|
||||
echo "Creating VNFFGD"
|
||||
openstack vnf graph descriptor create --vnffgd-file kolla-sample-vnffgd.yaml kolla-sample-vnffgd
|
||||
echo "Creating VNFFG"
|
||||
openstack vnf graph create --vnffgd-name kolla-sample-vnffgd kolla-sample-vnffg
|
||||
echo "Tacker sfc client floating ip address: $FLOATING_IP_CLIENT"
|
||||
echo "Tacker sfc server floating ip address: $FLOATING_IP_SERVER"
|
||||
cat << EOF
|
||||
|
||||
Done.
|
||||
|
||||
To create simple HTTP server in tacker_sfc_server instance run:
|
||||
|
||||
ssh cirros@$FLOATING_IP_SERVER 'while true; \\
|
||||
do echo -e "HTTP/1.0 200 OK\r\n\r\nW00t from Kolla HTTP server!" | sudo nc -l -p 80 ; done &'
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
deploy_sfc
|
Loading…
x
Reference in New Issue
Block a user