Add backend for GlusterFS, Nexanta, SAN, and SolidFire

Some drivers could not be used in multi backends mode. This
commit adds a backend class for theses drivers and make use
of them in their respective cloud::volume::* class

Change-Id: I2939610cfc078d6ec6b85e5516138620b106237f
This commit is contained in:
Sylvain Baubeau 2014-03-27 02:48:21 +01:00
parent 67e6bfad77
commit de0a1263a4
12 changed files with 476 additions and 51 deletions

View File

@ -0,0 +1,66 @@
#
# == Class: cinder::backend::glusterfs
#
# Configures Cinder to use GlusterFS as a volume driver
#
# === Parameters
#
# [*glusterfs_shares*]
# (required) An array of GlusterFS volume locations.
# Must be an array even if there is only one volume.
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*glusterfs_disk_util*]
# Removed in Icehouse.
#
# [*glusterfs_sparsed_volumes*]
# (optional) Whether or not to use sparse (thin) volumes.
# Defaults to undef which uses the driver's default of "true".
#
# [*glusterfs_mount_point_base*]
# (optional) Where to mount the Gluster volumes.
# Defaults to undef which uses the driver's default of "$state_path/mnt".
#
# [*glusterfs_shares_config*]
# (optional) The config file to store the given $glusterfs_shares.
# Defaults to '/etc/cinder/shares.conf'
#
# === Examples
#
# cinder::backend::glusterfs { 'myGluster':
# glusterfs_shares = ['192.168.1.1:/volumes'],
# }
#
define cinder::backend::glusterfs (
$glusterfs_shares,
$volume_backend_name = $name,
$glusterfs_disk_util = false,
$glusterfs_sparsed_volumes = undef,
$glusterfs_mount_point_base = undef,
$glusterfs_shares_config = '/etc/cinder/shares.conf'
) {
if $glusterfs_disk_util {
fail('glusterfs_disk_util is removed in Icehouse.')
}
$content = join($glusterfs_shares, "\n")
file { $glusterfs_shares_config:
content => "${content}\n",
require => Package['cinder'],
notify => Service['cinder-volume']
}
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value =>
'cinder.volume.drivers.glusterfs.GlusterfsDriver';
"${name}/glusterfs_shares_config": value => $glusterfs_shares_config;
"${name}/glusterfs_sparsed_volumes": value => $glusterfs_sparsed_volumes;
"${name}/glusterfs_mount_point_base": value => $glusterfs_mount_point_base;
}
}

View File

@ -0,0 +1,59 @@
# == Class: cinder::backend::nexenta
#
# Setups Cinder with Nexenta volume driver.
#
# === Parameters
#
# [*nexenta_user*]
# (required) User name to connect to Nexenta SA.
#
# [*nexenta_password*]
# (required) Password to connect to Nexenta SA.
#
# [*nexenta_host*]
# (required) IP address of Nexenta SA.
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*nexenta_volume*]
# (optional) Pool on SA that will hold all volumes. Defaults to 'cinder'.
#
# [*nexenta_target_prefix*]
# (optional) IQN prefix for iSCSI targets. Defaults to 'iqn:'.
#
# [*nexenta_target_group_prefix*]
# (optional) Prefix for iSCSI target groups on SA. Defaults to 'cinder/'.
#
# [*nexenta_blocksize*]
# (optional) Block size for volumes. Defaults to '8k'.
#
# [*nexenta_sparse*]
# (optional) Flag to create sparse volumes. Defaults to true.
#
define cinder::backend::nexenta (
$nexenta_user,
$nexenta_password,
$nexenta_host,
$volume_backend_name = $name,
$nexenta_volume = 'cinder',
$nexenta_target_prefix = 'iqn:',
$nexenta_target_group_prefix = 'cinder/',
$nexenta_blocksize = '8k',
$nexenta_sparse = true
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/nexenta_user": value => $nexenta_user;
"${name}/nexenta_password": value => $nexenta_password;
"${name}/nexenta_host": value => $nexenta_host;
"${name}/nexenta_volume": value => $nexenta_volume;
"${name}/nexenta_target_prefix": value => $nexenta_target_prefix;
"${name}/nexenta_target_group_prefix": value => $nexenta_target_group_prefix;
"${name}/nexenta_blocksize": value => $nexenta_blocksize;
"${name}/nexenta_sparse": value => $nexenta_sparse;
"${name}/volume_driver": value => 'cinder.volume.drivers.nexenta.volume.NexentaDriver';
}
}

80
manifests/backend/san.pp Normal file
View File

@ -0,0 +1,80 @@
# == Class: cinder::backend::san
#
# Configures Cinder volume SAN driver.
# Parameters are particular to each volume driver.
#
# === Parameters
#
# [*volume_driver*]
# (required) Setup cinder-volume to use volume driver.
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*san_thin_provision*]
# (optional) Use thin provisioning for SAN volumes? Defaults to true.
#
# [*san_ip*]
# (optional) IP address of SAN controller.
#
# [*san_login*]
# (optional) Username for SAN controller. Defaults to 'admin'.
#
# [*san_password*]
# (optional) Password for SAN controller.
#
# [*san_private_key*]
# (optional) Filename of private key to use for SSH authentication.
#
# [*san_clustername*]
# (optional) Cluster name to use for creating volumes.
#
# [*san_ssh_port*]
# (optional) SSH port to use with SAN. Defaults to 22.
#
# [*san_is_local*]
# (optional) Execute commands locally instead of over SSH
# use if the volume service is running on the SAN device.
#
# [*ssh_conn_timeout*]
# (optional) SSH connection timeout in seconds. Defaults to 30.
#
# [*ssh_min_pool_conn*]
# (optional) Minimum ssh connections in the pool.
#
# [*ssh_min_pool_conn*]
# (optional) Maximum ssh connections in the pool.
#
define cinder::backend::san (
$volume_driver,
$volume_backend_name = $name,
$san_thin_provision = true,
$san_ip = undef,
$san_login = 'admin',
$san_password = undef,
$san_private_key = undef,
$san_clustername = undef,
$san_ssh_port = 22,
$san_is_local = false,
$ssh_conn_timeout = 30,
$ssh_min_pool_conn = 1,
$ssh_max_pool_conn = 5
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value => $volume_driver;
"${name}/san_thin_provision": value => $san_thin_provision;
"${name}/san_ip": value => $san_ip;
"${name}/san_login": value => $san_login;
"${name}/san_password": value => $san_password;
"${name}/san_private_key": value => $san_private_key;
"${name}/san_clustername": value => $san_clustername;
"${name}/san_ssh_port": value => $san_ssh_port;
"${name}/san_is_local": value => $san_is_local;
"${name}/ssh_conn_timeout": value => $ssh_conn_timeout;
"${name}/ssh_min_pool_conn": value => $ssh_min_pool_conn;
"${name}/ssh_max_pool_conn": value => $ssh_max_pool_conn;
}
}

View File

@ -0,0 +1,64 @@
# == Class: cinder::backend::solidfire
#
# Configures Cinder volume SolidFire driver.
# Parameters are particular to each volume driver.
#
# === Parameters
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*volume_driver*]
# (optional) Setup cinder-volume to use SolidFire volume driver.
# Defaults to 'cinder.volume.drivers.solidfire.SolidFire'
#
# [*san_ip*]
# (required) IP address of SolidFire clusters MVIP.
#
# [*san_login*]
# (required) Username for SolidFire admin account.
#
# [*san_password*]
# (required) Password for SolidFire admin account.
#
# [*sf_emulate_512*]
# (optional) Use 512 byte emulation for volumes.
# Defaults to True
#
# [*sf_allow_tenant_qos*]
# (optional) Allow tenants to specify QoS via volume metadata.
# Defaults to False
#
# [*sf_account_prefix*]
# (optional) Prefix to use when creating tenant accounts on SolidFire Cluster.
# Defaults to None, so account name is simply the tenant-uuid
#
# [*sf_api_port*]
# (optional) Port ID to use to connect to SolidFire API.
# Defaults to 443
#
define cinder::backend::solidfire(
$san_ip,
$san_login,
$san_password,
$volume_backend_name = $name,
$volume_driver = 'cinder.volume.drivers.solidfire.SolidFire',
$sf_emulate_512 = true,
$sf_allow_tenant_qos = false,
$sf_account_prefix = '',
$sf_api_port = '443'
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value => $volume_driver;
"${name}/san_ip": value => $san_ip;
"${name}/san_login": value => $san_login;
"${name}/san_password": value => $san_password;
"${name}/sf_emulate_512": value => $sf_emulate_512;
"${name}/sf_allow_tenant_qos": value => $sf_allow_tenant_qos;
"${name}/sf_account_prefix": value => $sf_account_prefix;
"${name}/sf_api_port": value => $sf_api_port;
}
}

View File

@ -38,23 +38,11 @@ class cinder::volume::glusterfs (
$glusterfs_shares_config = '/etc/cinder/shares.conf'
) {
if $glusterfs_disk_util {
fail('glusterfs_disk_util is removed in Icehouse.')
}
$content = join($glusterfs_shares, "\n")
file {$glusterfs_shares_config:
content => "${content}\n",
require => Package['cinder'],
notify => Service['cinder-volume']
}
cinder_config {
'DEFAULT/volume_driver': value =>
'cinder.volume.drivers.glusterfs.GlusterfsDriver';
'DEFAULT/glusterfs_shares_config': value => $glusterfs_shares_config;
'DEFAULT/glusterfs_sparsed_volumes': value => $glusterfs_sparsed_volumes;
'DEFAULT/glusterfs_mount_point_base': value => $glusterfs_mount_point_base;
cinder::backend::glusterfs { 'DEFAULT':
glusterfs_shares => $glusterfs_shares,
glusterfs_disk_util => $glusterfs_disk_util,
glusterfs_sparsed_volumes => $glusterfs_sparsed_volumes,
glusterfs_mount_point_base => $glusterfs_mount_point_base,
glusterfs_shares_config => $glusterfs_shares_config,
}
}

View File

@ -39,15 +39,14 @@ class cinder::volume::nexenta (
$nexenta_sparse = true
) {
cinder_config {
'DEFAULT/nexenta_user': value => $nexenta_user;
'DEFAULT/nexenta_password': value => $nexenta_password;
'DEFAULT/nexenta_host': value => $nexenta_host;
'DEFAULT/nexenta_volume': value => $nexenta_volume;
'DEFAULT/nexenta_target_prefix': value => $nexenta_target_prefix;
'DEFAULT/nexenta_target_group_prefix': value => $nexenta_target_group_prefix;
'DEFAULT/nexenta_blocksize': value => $nexenta_blocksize;
'DEFAULT/nexenta_sparse': value => $nexenta_sparse;
'DEFAULT/volume_driver': value => 'cinder.volume.drivers.nexenta.volume.NexentaDriver';
cinder::backend::nexenta { 'DEFAULT':
nexenta_user => $nexenta_user,
nexenta_password => $nexenta_password,
nexenta_host => $nexenta_host,
nexenta_volume => $nexenta_volume,
nexenta_target_prefix => $nexenta_target_prefix,
nexenta_target_group_prefix => $nexenta_target_group_prefix,
nexenta_blocksize => $nexenta_blocksize,
nexenta_sparse => $nexenta_sparse,
}
}

View File

@ -57,18 +57,18 @@ class cinder::volume::san (
$ssh_max_pool_conn = 5
) {
cinder_config {
'DEFAULT/volume_driver': value => $volume_driver;
'DEFAULT/san_thin_provision': value => $san_thin_provision;
'DEFAULT/san_ip': value => $san_ip;
'DEFAULT/san_login': value => $san_login;
'DEFAULT/san_password': value => $san_password;
'DEFAULT/san_private_key': value => $san_private_key;
'DEFAULT/san_clustername': value => $san_clustername;
'DEFAULT/san_ssh_port': value => $san_ssh_port;
'DEFAULT/san_is_local': value => $san_is_local;
'DEFAULT/ssh_conn_timeout': value => $ssh_conn_timeout;
'DEFAULT/ssh_min_pool_conn': value => $ssh_min_pool_conn;
'DEFAULT/ssh_max_pool_conn': value => $ssh_max_pool_conn;
cinder::backend::san { 'DEFAULT':
volume_driver => $volume_driver,
san_thin_provision => $san_thin_provision,
san_ip => $san_ip,
san_login => $san_login,
san_password => $san_password,
san_private_key => $san_private_key,
san_clustername => $san_clustername,
san_ssh_port => $san_ssh_port,
san_is_local => $san_is_local,
ssh_conn_timeout => $ssh_conn_timeout,
ssh_min_pool_conn => $ssh_min_pool_conn,
ssh_max_pool_conn => $ssh_max_pool_conn,
}
}

View File

@ -43,17 +43,16 @@ class cinder::volume::solidfire(
$sf_allow_tenant_qos = false,
$sf_account_prefix = '',
$sf_api_port = '443'
) {
cinder_config {
'DEFAULT/volume_driver': value => $volume_driver;
'DEFAULT/san_ip': value => $san_ip;
'DEFAULT/san_login': value => $san_login;
'DEFAULT/san_password': value => $san_password;
'DEFAULT/sf_emulate_512': value => $sf_emulate_512;
'DEFAULT/sf_allow_tenant_qos':value => $sf_allow_tenant_qos;
'DEFAULT/sf_account_prefix': value => $sf_account_prefix;
'DEFAULT/sf_api_port': value => $sf_api_port;
cinder::backend::solidfire { 'DEFAULT':
san_ip => $san_ip,
san_login => $san_login,
san_password => $san_password,
volume_driver => $volume_driver,
sf_emulate_512 => $sf_emulate_512,
sf_allow_tenant_qos => $sf_allow_tenant_qos,
sf_account_prefix => $sf_account_prefix,
sf_api_port => $sf_api_port,
}
}

View File

@ -0,0 +1,61 @@
require 'spec_helper'
describe 'cinder::backend::glusterfs' do
shared_examples_for 'glusterfs volume driver' do
let(:title) {'mygluster'}
let :params do
{
:glusterfs_shares => ['10.10.10.10:/volumes', '10.10.10.11:/volumes'],
:glusterfs_shares_config => '/etc/cinder/other_shares.conf',
:glusterfs_sparsed_volumes => true,
:glusterfs_mount_point_base => '/cinder_mount_point',
}
end
it 'configures glusterfs volume driver' do
should contain_cinder_config('mygluster/volume_driver').with_value(
'cinder.volume.drivers.glusterfs.GlusterfsDriver')
should contain_cinder_config('mygluster/glusterfs_shares_config').with_value(
'/etc/cinder/other_shares.conf')
should contain_cinder_config('mygluster/glusterfs_sparsed_volumes').with_value(
true)
should contain_cinder_config('mygluster/glusterfs_mount_point_base').with_value(
'/cinder_mount_point')
should contain_file('/etc/cinder/other_shares.conf').with(
:content => "10.10.10.10:/volumes\n10.10.10.11:/volumes\n",
:require => 'Package[cinder]',
:notify => 'Service[cinder-volume]'
)
end
context "with an parameter which has been removed" do
before do
params.merge!({
:glusterfs_disk_util => 'foo',
})
end
it 'should fails' do
expect { subject }.to raise_error(Puppet::Error, /glusterfs_disk_util is removed in Icehouse./)
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
it_configures 'glusterfs volume driver'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
it_configures 'glusterfs volume driver'
end
end

View File

@ -0,0 +1,39 @@
# author 'Aimon Bustardo <abustardo at morphlabs dot com>'
# license 'Apache License 2.0'
# description 'configures openstack cinder nexenta driver'
require 'spec_helper'
describe 'cinder::backend::nexenta' do
let (:title) { 'nexenta' }
let :params do
{ :nexenta_user => 'nexenta',
:nexenta_password => 'password',
:nexenta_host => '127.0.0.2' }
end
let :default_params do
{ :nexenta_volume => 'cinder',
:nexenta_target_prefix => 'iqn:',
:nexenta_target_group_prefix => 'cinder/',
:nexenta_blocksize => '8k',
:nexenta_sparse => true }
end
let :facts do
{ :osfamily => 'Debian' }
end
context 'with required params' do
let :params_hash do
default_params.merge(params)
end
it 'configures nexenta volume driver' do
params_hash.each_pair do |config, value|
should contain_cinder_config("nexenta/#{config}").with_value(value)
end
end
end
end

View File

@ -0,0 +1,40 @@
require 'spec_helper'
describe 'cinder::backend::san' do
let (:title) { 'mysan' }
let :params do
{ :volume_driver => 'cinder.volume.san.SolarisISCSIDriver',
:san_ip => '127.0.0.1',
:san_login => 'cluster_operator',
:san_password => '007',
:san_clustername => 'storage_cluster' }
end
let :default_params do
{ :san_thin_provision => true,
:san_login => 'admin',
:san_ssh_port => 22,
:san_is_local => false,
:ssh_conn_timeout => 30,
:ssh_min_pool_conn => 1,
:ssh_max_pool_conn => 5 }
end
shared_examples_for 'a san volume driver' do
let :params_hash do
default_params.merge(params)
end
it 'configures cinder volume driver' do
params_hash.each_pair do |config,value|
should contain_cinder_config("mysan/#{config}").with_value( value )
end
end
end
context 'with parameters' do
it_configures 'a san volume driver'
end
end

View File

@ -0,0 +1,30 @@
require 'spec_helper'
describe 'cinder::backend::solidfire' do
let (:title) { 'solidfire' }
let :req_params do
{
:san_ip => '127.0.0.2',
:san_login => 'solidfire',
:san_password => 'password',
}
end
let :params do
req_params
end
describe 'solidfire volume driver' do
it 'configure solidfire volume driver' do
should contain_cinder_config('solidfire/volume_driver').with_value(
'cinder.volume.drivers.solidfire.SolidFire')
should contain_cinder_config('solidfire/san_ip').with_value(
'127.0.0.2')
should contain_cinder_config('solidfire/san_login').with_value(
'solidfire')
should contain_cinder_config('solidfire/san_password').with_value(
'password')
end
end
end