CentOS7 desktop openstack queens
-
使用
\\\r\n
替换官网教程命令中的\和换行
-
网络配置
- 网卡配置基本格式
TYPE=Ethernet BOOTPROTO=static ONBOOT=yes NAME=eth0 DEVICE=eth0 IPADDR=192.168.0.51 NETMASK=255.255.255.0 GATEWAY=192.168.0.1 DNS1=192.168.0.1
- hosts配置(所有节点)
192.168.0.51 controller 192.168.0.52 compute1
reboot
- 配置chrony(所有节点)
为了方便,所有节点都作为chrony客户端
vi /etc/chrony.conf
#server 0.centos.pool.ntp.org iburst #server 1.centos.pool.ntp.org iburst #server 2.centos.pool.ntp.org iburst #server 3.centos.pool.ntp.org iburst server ntp1.aliyun.com iburst
- 安装openstack软件包(所有节点)
yum install centos-release-openstack-queens -y
yum install python-openstackclient -y
- 如果selinux没有禁用(所有节点)
CentOS7 minimal 关闭 firewall NetworkManager selinux
安装 openstack-selinux软件包以自动管理OpenStack服务的安全策略
yum install openstack-selinux -y
- 配置sql(仅控制节点)
- 下载
yum install mariadb mariadb-server python2-PyMySQL -y
- 备份配置
cd /etc/my.cnf.d/
tar czvf my.cnf.d.tar.gz *
- 创建配置
openstack.cnf
vi openstack.cnf
[mysqld] bind-address = 192.168.0.51 default-storage-engine = innodb innodb_file_per_table = on max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8
- 启动
systemctl enable mariadb.service && systemctl start mariadb.service
- 配置数据库安装密码(可选)
mysql_secure_installation- 修改密码
登录:mysql -u root mysql
改密码:UPDATE user SET PASSWORD=PASSWORD('123456') where USER='root';
FLUSH PRIVILEGES;
退出:quit
重启服务:systemctl restart mariadb.service
注:发现如果不重启服务,那么密码不生效
- 消息队列RabbitMQ(控制节点)
- 下载
yum install rabbitmq-server -y
- 启动
systemctl enable rabbitmq-server.service && systemctl start rabbitmq-server.service
- 添加openstack用户
rabbitmqctl add_user openstack 123456
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
- Memcached(仅控制节点)
注:用于身份的tokens的缓存
- 下载
yum install memcached python-memcached -y
- 配置
vi /etc/sysconfig/memcached
OPTIONS="-l 127.0.0.1,::1,controller"
- 启动
systemctl enable memcached.service && systemctl start memcached.service
- ETCD(控制节点)
- 下载
yum install etcd -y
- 配置
vi /etc/etcd/etcd.conf
#[Member] ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="http://192.168.0.51:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.0.51:2379" ETCD_NAME="controller" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.0.51:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.0.51:2379" ETCD_INITIAL_CLUSTER="controller=http://192.168.0.51:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01" ETCD_INITIAL_CLUSTER_STATE="new"
- 启动
systemctl enable etcd && systemctl start etcd
- keystone(控制节点)
- 创建数据库
mysql -uroot -p123456
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
- 安装配置
yum install openstack-keystone httpd mod_wsgi -y
vi /etc/keystone/keystone.conf
[database] connection = mysql+pymysql://keystone:123456@controller/keystone [token] provider = fernet
- 填充数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
- 初始化Fernet密钥存储库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
- 引导身份服务
keystone-manage bootstrap --bootstrap-password 123456 --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
注:keystone-manage bootstrap
命令实际上是创建了default
domain- 配置Apache HTTP服务器
vi /etc/httpd/conf/httpd.conf
ServerName controller
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
- 启动httpd
systemctl enable httpd.service && systemctl start httpd.service
- 登录admin
export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_PROJECT_NAME=admin export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_DOMAIN_NAME=Default export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3
- 创建
example
domain(这个可用可不用,只是一个创建domain的示例)
openstack domain create --description "An Example Domain" example
- 创建
service
project
openstack project create --domain default --description "Service Project" service
注:service
project是给服务用的,每个服务会使用唯一的user添加到service
project中- 创建非特权用户和项目(这就是创建用户、角色、项目的一般步骤,可用可不用,也是一个示例)
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user
注:user
角色是必须存在的,否则在管理端创建项目的时候会失败- demo用户登录
export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=demo export OS_USERNAME=demo export OS_PASSWORD=123456 export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
- 请求admin用户身份认证令牌
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
注:参数跟8.登录admin
是一样的,如果存在相应环境变量,那么对应参数可以忽略,比如执行了8.登录admin
,那么获取token只需要使用
openstack token issue
- 请求demo用户身份认证令牌
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name demo --os-username demo token issue
- glance服务(控制节点)
- 创建数据库
mysql -uroot -p123456
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'controller' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '123456';
- 创建glance用户(其domain、project、角色)
. admin-openrc
openstack user create --domain default --password-prompt glance
openstack role add --project service --user glance admin
注:前面说过service
project用于服务,现在就把glance用户添加到service
project中,并且添加管理员角色- 创建glance服务
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
注:openstack service create
中glance
是服务名,image
是服务type,这个应该是随意的- 安装glance包
yum install openstack-glance -y
- 配置
vi /etc/glance/glance-api.conf
[database] connection = mysql+pymysql://glance:123456@controller/glance [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = 123456 [paste_deploy] flavor = keystone [glance_store] stores = file,http default_store = file filesystem_store_datadir = /var/lib/glance/images/
- 配置
/etc/glance/glance-registry.conf
注:Glance注册服务及其API已在皇后区版本中弃用,即systemctl start openstack-glance-registry.service
没用了- 填充数据库
su -s /bin/sh -c "glance-manage db_sync" glance
- 启动
systemctl enable openstack-glance-api.service && systemctl start openstack-glance-api.service
- compute(控制节点)
- 创建数据库
mysql -uroot -p123456
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'controller' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'controller' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'controller' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '123456';
- 创建nova用户
. admin-openrc
openstack user create --domain default --password-prompt nova
openstack role add --project service --user nova admin
- 创建nova service
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
- 创建placement用户
. admin-openrc
openstack user create --domain default --password-prompt placement
openstack role add --project service --user placement admin
- 创建placement service
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
- 安装
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y
- 配置
vi /etc/nova/nova.conf
[DEFAULT] enabled_apis = osapi_compute,metadata my_ip = 192.168.0.51 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver transport_url = rabbit://openstack:123456@controller [api_database] connection = mysql+pymysql://nova:123456@controller/nova_api [database] connection = mysql+pymysql://nova:123456@controller/nova [api] auth_strategy = keystone [keystone_authtoken] auth_url = http://controller:5000/v3 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = 123456 [vnc] enabled = true server_listen = $my_ip server_proxyclient_address = $my_ip [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] os_region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = 123456
注:关于
nova.virt.firewall.NoopFirewallDriver
,默认情况下,Compute使用内部防火墙驱动程序。由于Networking服务包含防火墙驱动程序,因此必须使用nova.virt.firewall.NoopFirewallDriver防火墙驱动程序禁用Compute防火墙驱动 程序。nova.virt.firewall.NoopFirewallDriver firewall driver
- 配置httpd,启用对
Placement API
的访问
vi /etc/httpd/conf.d/00-nova-placement-api.conf
<Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory>
- 重启
httpd
systemctl restart httpd
- 填充数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
- 验证是否正确注册
nova-manage cell_v2 list_cells
- 启动服务
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service && systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
- compute(计算节点)
- 下载
yum install openstack-nova-compute -y
- 配置
vi /etc/nova/nova.conf
[DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:123456@controller my_ip = 192.168.0.52 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api] auth_strategy = keystone [keystone_authtoken] auth_url = http://controller:5000/v3 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = 123456 [vnc] enabled = True server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://controller:6080/vnc_auto.html [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] os_region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = 123456
- 启动
systemctl enable libvirtd.service openstack-nova-compute.service && systemctl restart libvirtd.service openstack-nova-compute.service
- 将本计算节点添加到cell数据库中(在控制节点上执行)
. admin-openrc
重启计算节点,否则下面的命令获取的Host
是localhost.localdomain
,更会影响下下条命令
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
- 验证(在控制节点上)
openstack compute service list
- neutron(控制节点)
- 创建数据库
mysql -uroot -p123456
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'controller' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456';
- 创建neutron用户
. admin-openrc
openstack user create --domain default --password-prompt neutron
openstack role add --project service --user neutron admin
- 创建neutron service
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
- 下载(
self-service networks
)
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
- 配置(
self-service networks
)
vi /etc/neutron/neutron.conf
[DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = true transport_url = rabbit://openstack:123456@controller auth_strategy = keystone notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [database] connection = mysql+pymysql://neutron:123456@controller/neutron [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = 123456 [nova] auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
- 配置(
self-service networks
)
vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan mechanism_drivers = linuxbridge,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = provider [ml2_type_vxlan] vni_ranges = 1:1000 [securitygroup] enable_ipset = true
- 配置(
self-service networks
)
vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge] physical_interface_mappings = provider:ens33 [vxlan] enable_vxlan = true local_ip = 192.168.0.51 l2_population = true [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
- 配置(
self-service networks
)
通过验证以下所有sysctl值设置为1:确保您的Linux操作系统内核支持网桥过滤器
vi /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1
重启生效:
reboot
立即生效:
modprobe br_netfilter
sysctl -p
引用:br_netfilter 模块开机自动方法
CentOS 7 开机加载内核模块
注:因为openstack已经配置了br_netfilter
开机自启动,所以不需要进行开机启动重复配置了
- 配置(
self-service networks
)
vi /etc/neutron/l3_agent.ini
[DEFAULT] interface_driver = linuxbridge
- 配置(
self-service networks
)
vi /etc/neutron/dhcp_agent.ini
[DEFAULT] interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true
- 配置
vi /etc/neutron/metadata_agent.ini
[DEFAULT] nova_metadata_host = controller metadata_proxy_shared_secret = 123456
- 配置
vi /etc/nova/nova.conf
[neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = 123456 service_metadata_proxy = true metadata_proxy_shared_secret = 123456
- 数据库
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
- 启动
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service && systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
- 启动(
self-service networks
)
systemctl enable neutron-l3-agent.service && systemctl restart neutron-l3-agent.service
- neutron(计算节点)
- 下载
yum install openstack-neutron-linuxbridge ebtables ipset -y
- 配置
vi /etc/neutron/neutron.conf
[DEFAULT] transport_url = rabbit://openstack:123456@controller auth_strategy = keystone [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
- 配置(
self-service networks
)
vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge] physical_interface_mappings = provider:ens33 [vxlan] enable_vxlan = true local_ip = 192.168.0.52 l2_population = true [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
- 配置(
self-service networks
)
通过验证以下所有sysctl值设置为1:确保您的Linux操作系统内核支持网桥过滤器
vi /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1
重启生效:
reboot
立即生效:
modprobe br_netfilter
sysctl -p
- 配置
vi /etc/nova/nova.conf
[neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = 123456
- 启动
systemctl restart openstack-nova-compute.service
- 启动
systemctl enable neutron-linuxbridge-agent.service && systemctl start neutron-linuxbridge-agent.service
- 验证(在控制节点)
openstack network agent list
* cinder(在控制节点)
- 创建数据库
mysql -uroot -p123456
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'controller' IDENTIFIED BY '123456';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '123456';
- 创建cinder用户
. admin-openrc
openstack user create --domain default --password-prompt cinder
openstack role add --project service --user cinder admin
- 创建cinder2、cinder3服务
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
注:cinder需要2个服务- 下载
yum install openstack-cinder -y
- 配置
vi /etc/cinder/cinder.conf
[DEFAULT] transport_url = rabbit://openstack:123456@controller auth_strategy = keystone my_ip = 192.168.0.51 [database] connection = mysql+pymysql://cinder:123456@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_id = default user_domain_id = default project_name = service username = cinder password = 123456 [oslo_concurrency] lock_path = /var/lib/cinder/tmp
- 填充数据库
su -s /bin/sh -c "cinder-manage db sync" cinder
- 配置
vi /etc/nova/nova.conf
[cinder] os_region_name = RegionOne
- 重启
systemctl restart openstack-nova-api.service
- 启动
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service && systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
* cinder-lvm(在块存储节点)
- 下载
yum install lvm2 device-mapper-persistent-data -y
- 启动
systemctl enable lvm2-lvmetad.service && systemctl start lvm2-lvmetad.service
- 创建lvm物理卷
pvcreate /dev/vda
注:这里的/dev/vda
是一个新挂载的磁盘- 创建lvm卷组cinder-volumes
vgcreate cinder-volumes /dev/vda
注:cinder-volumes
是一个卷组,是把多个分区或磁盘合并成的一个磁盘,就是把这个提供出去,感觉块存储就是提供了一个磁盘,然后里面由openstack自己分区- 配置lvm仅扫描
/dev/vda
vi /etc/lvm/lvm.conf
devices { filter = [ "a/vda/", "r/.*/"]
配置/etc/lvm/lvm.conf的原因
注:上面a
是accept
,r
是reject
* cinder(在块存储节点)
- 安装
yum install openstack-cinder targetcli python-keystone -y
- 配置
vi /etc/cinder/cinder.conf
[DEFAULT] transport_url = rabbit://openstack:123456@controller auth_strategy = keystone my_ip = 192.168.0.53 enabled_backends = lvm glance_api_servers = http://controller:9292 [database] connection = mysql+pymysql://cinder:123456@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_id = default user_domain_id = default project_name = service username = cinder password = 123456 [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes iscsi_protocol = iscsi iscsi_helper = lioadm [oslo_concurrency] lock_path = /var/lib/cinder/tmp
enabled_backends = lvm
的lvm
和[lvm]
是关联的,enabled_backends
是任意的,比如enabled_backends = lvm1
,就有[lvm1]
- 启动
systemctl enable openstack-cinder-volume.service target.service && systemctl start openstack-cinder-volume.service target.service
- 验证(在控制节点)
openstack volume service list
- dashboard(控制节点)
- 下载
yum install openstack-dashboard -y
- 配置
vi /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller" OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user" ALLOWED_HOSTS = ['*'] SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'controller:11211', } } OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 2, } OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default" OPENSTACK_NEUTRON_NETWORK = { ... 'enable_router': False, 'enable_quotas': False, 'enable_distributed_router': False, 'enable_ha_router': False, 'enable_lb': False, 'enable_firewall': False, 'enable_vpn': False, 'enable_fip_topology_check': False, } TIME_ZONE = "UTC"
注:
'enable_router': True,
可以使用路由器(router),但是这个必须有self-service network
- 配置
vi /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
- 启动服务
systemctl restart httpd.service memcached.service
- 访问
http://controller/dashboard