Adding fqdn

This commit is contained in:
Bilal Baqar 2016-03-12 12:38:52 -08:00
commit 6ba8a41d48
11 changed files with 237 additions and 78 deletions

View File

@ -7,7 +7,7 @@ virtualenv:
netaddr jinja2
lint: virtualenv
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402
@charm proof
unit_test: virtualenv

View File

@ -11,6 +11,24 @@ options:
type: string
default: 'juju-br0'
description: The interface connected to PLUMgrid Managment network.
os-data-network:
type: string
default:
description: |
The IP address and netmask of the OpenStack Data network (e.g.,
192.168.0.0/24)
.
This network will be used for tenant network traffic in overlay
networks.
fabric-interfaces:
default: 'MANAGEMENT'
type: string
description: |
Interfaces that will provide fabric connectivity on the director nodes.
Provided in form of json in a string. These interfaces have to be connected
to the os-data-network specified in the config. Default value is MANAGEMENT which
will configure the management interface as the fabric interface on each
director.
network-device-mtu:
type: string
default: '1580'

View File

@ -3,6 +3,10 @@
# This file contains the class that generates context
# for PLUMgrid template files.
import re
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.contrib.network.ip import get_address_in_network
from charmhelpers.core.hookenv import (
config,
unit_get,
@ -12,12 +16,10 @@ from charmhelpers.core.hookenv import (
related_units,
relation_get,
)
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.contrib.network.ip import get_address_in_network
import re
from socket import gethostname as get_unit_hostname
from socket import (
gethostname,
getfqdn
)
def _pg_dir_ips():
@ -71,6 +73,7 @@ class PGDirContext(context.NeutronContext):
pg_dir_ips = _pg_dir_ips()
pg_dir_ips.append(str(get_address_in_network(network=None,
fallback=get_host_ip(unit_get('private-address')))))
pg_dir_ips = sorted(pg_dir_ips)
pg_ctxt['director_ips'] = pg_dir_ips
pg_dir_ips_string = ''
single_ip = True
@ -82,10 +85,13 @@ class PGDirContext(context.NeutronContext):
pg_dir_ips_string = pg_dir_ips_string + ',' + str(ip)
pg_ctxt['director_ips_string'] = pg_dir_ips_string
pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip']
pg_ctxt['pg_hostname'] = "pg-director"
from pg_dir_utils import get_mgmt_interface
unit_hostname = gethostname()
pg_ctxt['pg_hostname'] = unit_hostname
pg_ctxt['pg_fqdn'] = getfqdn()
from pg_dir_utils import get_mgmt_interface, get_fabric_interface
pg_ctxt['interface'] = get_mgmt_interface()
pg_ctxt['label'] = get_unit_hostname()
pg_ctxt['fabric_interface'] = get_fabric_interface()
pg_ctxt['label'] = unit_hostname
pg_ctxt['fabric_mode'] = 'host'
virtual_ip_array = re.split('\.', conf['plumgrid-virtual-ip'])
pg_ctxt['virtual_router_id'] = virtual_ip_array[3]

View File

@ -5,8 +5,6 @@
# The hooks of this charm have been symlinked to functions
# in this file.
import sys
import time
from charmhelpers.core.hookenv import (
Hooks,
UnregisteredHookError,
@ -30,8 +28,13 @@ from pg_dir_utils import (
ensure_mtu,
add_lcm_key,
post_pg_license,
fabric_interface_changed,
load_iptables
)
import sys
import time
hooks = Hooks()
CONFIGS = register_configs()
@ -41,13 +44,14 @@ def install():
'''
Install hook is run when the charm is first deployed on a node.
'''
load_iptables()
configure_sources(update=True)
pkgs = determine_packages()
for pkg in pkgs:
apt_install(pkg, options=['--force-yes'], fatal=True)
load_iovisor()
ensure_mtu()
add_lcm_key()
CONFIGS.write_all()
@hooks.hook('director-relation-joined')
@ -65,12 +69,22 @@ def config_changed():
This hook is run when a config parameter is changed.
It also runs on node reboot.
'''
if post_pg_license():
log("PLUMgrid License Posted")
return 1
if add_lcm_key():
log("PLUMgrid LCM Key added")
return 1
charm_config = config()
if charm_config.changed('plumgrid-license-key'):
if post_pg_license():
log("PLUMgrid License Posted")
return 1
if charm_config.changed('fabric-interfaces'):
if not fabric_interface_changed():
log("Fabric interface already set")
return 1
if charm_config.changed('os-data-network'):
if charm_config['fabric-interfaces'] == 'MANAGEMENT':
log('Fabric running on managment network')
return 1
stop_pg()
configure_sources(update=True)
pkgs = determine_packages()
@ -98,6 +112,14 @@ def start():
time.sleep(15)
@hooks.hook('upgrade-charm')
def upgrade_charm():
'''
This hook is run when the charm is upgraded
'''
load_iptables()
@hooks.hook('stop')
def stop():
'''

View File

@ -2,8 +2,18 @@
# This file contains functions used by the hooks to deploy PLUMgrid Director.
import pg_dir_context
import subprocess
import time
import os
import json
from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
from copy import deepcopy
from charmhelpers.contrib.openstack import templating
from charmhelpers.core.host import set_nic_mtu
from collections import OrderedDict
from charmhelpers.contrib.storage.linux.ceph import modprobe
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
config,
@ -13,15 +23,14 @@ from charmhelpers.contrib.network.ip import (
get_iface_from_addr,
get_bridges,
get_bridge_nics,
is_ip
is_ip,
is_address_in_network,
get_iface_addr
)
from charmhelpers.fetch import (
apt_cache
apt_cache,
apt_install
)
from charmhelpers.contrib.openstack import templating
from charmhelpers.core.host import set_nic_mtu
from collections import OrderedDict
from charmhelpers.contrib.storage.linux.ceph import modprobe
from charmhelpers.contrib.openstack.utils import (
os_release,
)
@ -29,17 +38,11 @@ from charmhelpers.core.host import (
service_start,
service_stop,
)
import pg_dir_context
import subprocess
import time
import os
import json
LXC_CONF = '/etc/libvirt/lxc.conf'
TEMPLATES = 'templates/'
PG_LXC_DATA_PATH = '/var/lib/libvirt/filesystems/plumgrid-data'
PG_LXC_PATH = '/var/lib/libvirt/filesystems/plumgrid'
PG_CONF = '%s/conf/pg/plumgrid.conf' % PG_LXC_DATA_PATH
PG_KA_CONF = '%s/conf/etc/keepalived.conf' % PG_LXC_DATA_PATH
PG_DEF_CONF = '%s/conf/pg/nginx.conf' % PG_LXC_DATA_PATH
@ -49,7 +52,6 @@ PG_IFCS_CONF = '%s/conf/pg/ifcs.conf' % PG_LXC_DATA_PATH
AUTH_KEY_PATH = '%s/root/.ssh/authorized_keys' % PG_LXC_DATA_PATH
TEMP_LICENSE_FILE = '/tmp/license'
BASE_RESOURCE_MAP = OrderedDict([
(PG_KA_CONF, {
'services': ['plumgrid'],
@ -140,7 +142,6 @@ def restart_pg():
'''
service_stop('plumgrid')
time.sleep(2)
_exec_cmd(cmd=['iptables', '-F'])
service_start('plumgrid')
time.sleep(5)
@ -169,22 +170,23 @@ def remove_iovisor():
time.sleep(1)
def interface_exists(interface):
'''
Checks if interface exists on node.
'''
try:
subprocess.check_call(['ip', 'link', 'show', interface],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
return True
def get_mgmt_interface():
'''
Returns the managment interface.
'''
def interface_exists(interface):
'''
Checks if interface exists on node.
'''
try:
subprocess.check_call(['ip', 'link', 'show', interface],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
return True
mgmt_interface = config('mgmt-interface')
if interface_exists(mgmt_interface):
return mgmt_interface
@ -194,17 +196,65 @@ def get_mgmt_interface():
return get_iface_from_addr(unit_get('private-address'))
def fabric_interface_changed():
'''
Returns true if interface for node changed.
'''
fabric_interface = get_fabric_interface()
try:
with open(PG_IFCS_CONF, 'r') as ifcs:
for line in ifcs:
if 'fabric_core' in line:
if line.split()[0] == fabric_interface:
return False
except IOError:
return True
return True
def get_fabric_interface():
'''
Returns the fabric interface.
'''
fabric_interfaces = config('fabric-interfaces')
if fabric_interfaces == 'MANAGEMENT':
return get_mgmt_interface()
else:
try:
all_fabric_interfaces = json.loads(fabric_interfaces)
except ValueError:
raise ValueError('Invalid json provided for fabric interfaces')
hostname = get_unit_hostname()
if hostname in all_fabric_interfaces:
node_fabric_interface = all_fabric_interfaces[hostname]
elif 'DEFAULT' in all_fabric_interfaces:
node_fabric_interface = all_fabric_interfaces['DEFAULT']
else:
raise ValueError('No fabric interface provided for node')
if interface_exists(node_fabric_interface):
if is_address_in_network(config('os-data-network'),
get_iface_addr(node_fabric_interface)[0]):
return node_fabric_interface
else:
raise ValueError('Fabric interface not in fabric network')
else:
log('Provided fabric interface %s does not exist'
% node_fabric_interface)
raise ValueError('Provided fabric interface does not exist')
return node_fabric_interface
def ensure_mtu():
'''
Ensures required MTU of the underlying networking of the node.
'''
interface_mtu = config('network-device-mtu')
mgmt_interface = get_mgmt_interface()
if mgmt_interface in get_bridges():
attached_interfaces = get_bridge_nics(mgmt_interface)
fabric_interface = get_fabric_interface()
if fabric_interface in get_bridges():
attached_interfaces = get_bridge_nics(fabric_interface)
for interface in attached_interfaces:
set_nic_mtu(interface, interface_mtu)
set_nic_mtu(mgmt_interface, interface_mtu)
set_nic_mtu(fabric_interface, interface_mtu)
def _exec_cmd(cmd=None, error_msg='Command exited with ERRORs', fatal=False):
@ -275,8 +325,7 @@ def post_pg_license():
'plumgrid:plumgrid',
LICENSE_POST_PATH,
'-d',
json.dumps(license)
]
json.dumps(license)]
licence_get_cmd = [PG_CURL, '-u', 'plumgrid:plumgrid', LICENSE_GET_PATH]
try:
old_license = subprocess.check_output(licence_get_cmd)
@ -290,3 +339,55 @@ def post_pg_license():
log('No change in PLUMgrid License')
return 0
return 1
def load_iptables():
'''
Loads iptables rules to allow all PLUMgrid communication.
'''
network = get_cidr_from_iface(get_mgmt_interface())
if network:
_exec_cmd(['sudo', 'iptables', '-A', 'INPUT', '-p', 'tcp',
'-j', 'ACCEPT', '-s', network, '-d',
network, '-m', 'state', '--state', 'NEW'])
_exec_cmd(['sudo', 'iptables', '-A', 'INPUT', '-p', 'udp', '-j',
'ACCEPT', '-s', network, '-d', network,
'-m', 'state', '--state', 'NEW'])
_exec_cmd(['sudo', 'iptables', '-I', 'INPUT', '-s', network,
'-d', '224.0.0.18/32', '-j', 'ACCEPT'])
_exec_cmd(['sudo', 'iptables', '-I', 'INPUT', '-p', 'vrrp', '-j',
'ACCEPT'])
_exec_cmd(['sudo', 'iptables', '-A', 'INPUT', '-p', 'tcp', '-j',
'ACCEPT', '-d', config('plumgrid-virtual-ip'), '-m',
'state', '--state', 'NEW'])
apt_install('iptables-persistent')
def get_cidr_from_iface(interface):
'''
Determines Network CIDR from interface.
'''
if not interface:
return None
apt_install('ohai')
try:
os_info = subprocess.check_output(['ohai', '-l', 'fatal'])
except OSError:
log('Unable to get operating system information')
return None
try:
os_info_json = json.loads(os_info)
except ValueError:
log('Unable to determine network')
return None
device = os_info_json['network']['interfaces'].get(interface)
if device is not None:
if device.get('routes'):
routes = device['routes']
for net in routes:
if 'scope' in net:
return net.get('destination')
else:
return None
else:
return None

1
hooks/upgrade-charm Symbolic link
View File

@ -0,0 +1 @@
pg_dir_hooks.py

View File

@ -1,5 +1,5 @@
127.0.0.1 localhost
127.0.1.1 {{ pg_hostname }}
127.0.1.1 {{ pg_fqdn }} {{ pg_hostname }}
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback

View File

@ -1,2 +1,2 @@
{{ interface }} = fabric_core host
{{ fabric_interface }} = fabric_core host

View File

@ -12,6 +12,10 @@ upstream pgCli {
server {{ virtual_ip }}:3000;
}
upstream pgMW {
server 127.0.0.1:4000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
@ -58,6 +62,19 @@ server {
proxy_set_header Host $host;
}
location /mwv0 {
proxy_pass http://pgMW;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
location /cloudApex/ {
index index.html;
}
location /vtap/ {
alias /opt/pg/vtap;
}

View File

@ -8,7 +8,8 @@ TO_PATCH = [
'config',
'unit_get',
'get_host_ip',
'get_unit_hostname',
'gethostname',
'getfqdn'
]
@ -46,11 +47,12 @@ class PGDirContextTest(CharmTestCase):
@patch.object(charmhelpers.contrib.openstack.context, 'unit_private_ip')
@patch.object(context, '_pg_dir_ips')
@patch.object(utils, 'get_mgmt_interface')
def test_neutroncc_context_api_rel(self, _mgmt_int, _pg_dir_ips,
_unit_priv_ip, _npa, _ens_pkgs,
_save_ff, _https, _is_clus,
_unit_get, _config, _runits, _rids,
_rget):
@patch.object(utils, 'get_fabric_interface')
def test_neutroncc_context_api_rel(self, _fabric_int, _mgmt_int,
_pg_dir_ips, _unit_priv_ip, _npa,
_ens_pkgs, _save_ff, _https,
_is_clus, _unit_get, _config,
_runits, _rids, _rget):
def mock_npa(plugin, section, manager):
if section == "driver":
return "neutron.randomdriver"
@ -70,10 +72,12 @@ class PGDirContextTest(CharmTestCase):
_npa.side_effect = mock_npa
_unit_get.return_value = '192.168.100.201'
_unit_priv_ip.return_value = '192.168.100.201'
self.get_unit_hostname.return_value = 'node0'
self.gethostname.return_value = 'node0'
self.getfqdn.return_value = 'node0.maas'
self.get_host_ip.return_value = '192.168.100.201'
_pg_dir_ips.return_value = ['192.168.100.202', '192.168.100.203']
_mgmt_int.return_value = 'juju-br0'
_fabric_int.return_value = 'juju-br0'
napi_ctxt = context.PGDirContext()
expect = {
'config': 'neutron.randomconfig',
@ -84,14 +88,16 @@ class PGDirContextTest(CharmTestCase):
'neutron_security_groups': None,
'neutron_url': 'https://None:9696',
'virtual_ip': '192.168.100.250',
'pg_hostname': 'pg-director',
'pg_hostname': 'node0',
'pg_fqdn': 'node0.maas',
'interface': 'juju-br0',
'fabric_interface': 'juju-br0',
'label': 'node0',
'fabric_mode': 'host',
'virtual_router_id': '250',
'director_ips': ['192.168.100.202', '192.168.100.203',
'192.168.100.201'],
'director_ips': ['192.168.100.201', '192.168.100.202',
'192.168.100.203'],
'director_ips_string':
'192.168.100.202,192.168.100.203,192.168.100.201',
'192.168.100.201,192.168.100.202,192.168.100.203',
}
self.assertEquals(expect, napi_ctxt())

View File

@ -1,5 +1,7 @@
from mock import MagicMock, patch, call
from test_utils import CharmTestCase
with patch('charmhelpers.core.hookenv.config') as config:
config.return_value = 'neutron'
import pg_dir_utils as utils
@ -29,7 +31,8 @@ TO_PATCH = [
'add_lcm_key',
'determine_packages',
'post_pg_license',
'config'
'config',
'load_iptables'
]
NEUTRON_CONF_DIR = "/etc/neutron"
@ -58,25 +61,10 @@ class PGDirHooksTests(CharmTestCase):
])
self.load_iovisor.assert_called_with()
self.ensure_mtu.assert_called_with()
self.add_lcm_key.assert_called_with()
def test_config_changed_hook(self):
_pkgs = ['plumgrid-lxc', 'iovisor-dkms']
self.add_lcm_key.return_value = 0
self.post_pg_license.return_value = 0
self.determine_packages.return_value = [_pkgs]
self.add_lcm_key.return_value = 1
self._call_hook('config-changed')
self.stop_pg.assert_called_with()
self.configure_sources.assert_called_with(update=True)
self.apt_install.assert_has_calls([
call(_pkgs, fatal=True,
options=['--force-yes']),
])
self.load_iovisor.assert_called_with()
self.ensure_mtu.assert_called_with()
self.CONFIGS.write_all.assert_called_with()
self.restart_pg.assert_called_with()
def test_start(self):
self._call_hook('start')