openstack高可用集群2-openstack部署-(嵌套+openvswitch)(OpenStack P版在CentOS 7.6的部署)

CentOS 7.6结合openstack  pike版本详解
因为实验资源有限,本次实验在同一物理机(centos7.4)开启嵌套虚拟化并使用openvswitch做为网络底层进行实验
 
 
 
 
第一章 建立实验使用的虚机
 
实验物理机
[root@bj_dfgc_gitserver_2 vm]# cat /sys/module/kvm_intel/parameters/nested
N
没有KVM上启用嵌套虚拟化
cat >/etc/modprobe.d/kvm-nested.conf<<EOF
options kvm-intel nested=1
options kvm-intel enable_shadow_vmcs=1
options kvm-intel enable_apicv=1
options kvm-intel ept=1
EOF
 
[root@bj_dfgc_gitserver_2 vm]# modprobe -r kvm_intel
[root@bj_dfgc_gitserver_2 vm]# lsmod | grep kvm
[root@bj_dfgc_gitserver_2 vm]# modprobe -a kvm_intel
[root@bj_dfgc_gitserver_2 vm]# lsmod | grep kvm
kvm_intel             170086  0
kvm                   566340  1 kvm_intel
irqbypass              13503  1 kvm
[root@bj_dfgc_gitserver_2 vm]# cat /sys/module/kvm_intel/parameters/nested
Y
 
OK, KVM上启用了嵌套虚拟化
 
安装openvswitch
yum源自带的openvswitch包好像有毛病,安装后服务起不来....
http://rpmfind.net/网站能搜索到各类rpm包,咱们搜索openvswitch的rpm并下载下来进行安装,要不就须要编译安装了...
 
yum install openvswitch-2.0.0-7.el7.x86_64.rpm -y
启动openvswitch的服务
systemctl start openvswitch
systemctl enable openvswitch
 
安装和启动libvirtd服务
yum install qemu-kvm qemu-kvm-tools virt-manager libvirt virt-install -y
systemctl enable libvirtd
systemctl start libvirtd
 
建立虚机
 
编辑虚机xml文件
[root@bj_dfgc_gitserver_2 vm]# cat node1.xml
<domain type='kvm'>
  <name>node1</name>
  <memory unit='GiB'>24</memory>
  <currentMemory unit='GiB'>8</currentMemory>
  <vcpu placement='static' current='8'>24</vcpu>
  <os>
    <type arch='x86_64' machine='pc'>hvm</type>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <cpu mode="host-passthrough"/>
  <clock offset='localtime'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>destroy</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node1.qcow2'/>
      <target dev='vda' bus='virtio'/>
    </disk>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node1-data.qcow2'/>
      <target dev='vdb' bus='virtio'/>
    </disk>
    <controller type='pci' index='0' model='pci-root'>
      <alias name='pci.0'/>
    </controller>
    <interface type='bridge'>
      <model type='virtio'/>
      <source bridge='br_p4p1'/>
      <driver name='vhost'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw1'/>
      <vlan trunk='yes'>
        <tag id='99'/>
        <tag id='100'/>
        <tag id='101'/>
        <tag id='102'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw1-1'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw2'/>
      <vlan trunk='yes'>
        <tag id='809'/>
        <tag id='810'/>
        <tag id='811'/>
        <tag id='812'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw2-1'/>
    </interface>
    <serial type='pty'/>
    <input type='tablet' bus='usb'/>
    <graphics type='vnc' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
    <video>
      <model type='cirrus'/>
    </video>
    <memballoon model='virtio'>
      <stats period='10'/>
    </memballoon>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/node1.agent'/>
      <target type='virtio' name='org.qemu.guest_agent.0'/>
    </channel>
  </devices>
</domain>
 
-------------------------------------------------------------
# cat node2.xml
<domain type='kvm'>
  <name>node2</name>
  <memory unit='GiB'>24</memory>
  <currentMemory unit='GiB'>16</currentMemory>
  <vcpu placement='static' current='8'>24</vcpu>
  <os>
    <type arch='x86_64' machine='pc'>hvm</type>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <cpu mode="host-passthrough"/>
  <clock offset='localtime'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>destroy</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node2.qcow2'/>
      <target dev='vda' bus='virtio'/>
    </disk>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node2-data.qcow2'/>
      <target dev='vdb' bus='virtio'/>
    </disk>
    <controller type='pci' index='0' model='pci-root'>
      <alias name='pci.0'/>
    </controller>
    <interface type='bridge'>
      <model type='virtio'/>
      <source bridge='br_p4p1'/>
      <driver name='vhost'/>
    </interface>
    <interface type='bridge'>
      <model type='virtio'/>
      <source bridge='br_p4p1'/>
      <driver name='vhost'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw1'/>
      <vlan trunk='yes'>
        <tag id='99'/>
        <tag id='100'/>
        <tag id='101'/>
        <tag id='102'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw1-2'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw2'/>
      <vlan trunk='yes'>
        <tag id='809'/>
        <tag id='810'/>
        <tag id='811'/>
        <tag id='812'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw2-2'/>
    </interface>
    <serial type='pty'/>
    <input type='tablet' bus='usb'/>
    <graphics type='vnc' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
    <video>
      <model type='cirrus'/>
    </video>
    <memballoon model='virtio'>
      <stats period='10'/>
    </memballoon>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/node2.agent'/>
      <target type='virtio' name='org.qemu.guest_agent.0'/>
    </channel>
  </devices>
</domain>
 
 
从已经制做好的centos7.6镜像直接拿来用
[root@bj_dfgc_gitserver_2 vm]# cp centos7.1810.qcow2 node1.qcow2
[root@bj_dfgc_gitserver_2 vm]# cp centos7.1810.qcow2 node2.qcow2
 
建立实验使用的桥接网卡br0和br1
ovs-vsctl add-br opensw1
ovs-vsctl add-br opensw2
 
为node1和node2建立存储磁盘
[root@bj_dfgc_gitserver_2 vm]# qemu-img create -f qcow2 node2-data.qcow2 500G
Formatting 'node2-data.qcow2', fmt=qcow2 size=536870912000 encryption=off cluster_size=65536 lazy_refcounts=off
[root@bj_dfgc_gitserver_2 vm]# qemu-img info node2-data.qcow2
image: node2-data.qcow2
file format: qcow2
virtual size: 500G (536870912000 bytes)
disk size: 200K
cluster_size: 65536
Format specific information:
    compat: 1.1
    lazy refcounts: false
 
启动二台实验虚机
 
[root@bj_dfgc_gitserver_2 vm]# virsh define node1.xml
[root@bj_dfgc_gitserver_2 vm]# virsh define node2.xml
[root@bj_dfgc_gitserver_2 vm]# virsh start node1
[root@bj_dfgc_gitserver_2 vm]# virsh start node2
 
查看启动后的桥接状况
# ovs-vsctl show
ed4b0290-d9c8-44a6-83cf-3093ce28c7cc
    Bridge "opensw1"
        Port "opensw1"
            Interface "opensw1"
                type: internal
        Port "opensw1-2"
            trunks: [99, 100, 101, 102]
            Interface "opensw1-2"
        Port "opensw1-1"
            trunks: [99, 100, 101, 102]
            Interface "opensw1-1"
    Bridge "opensw2"
        Port "opensw2-2"
            trunks: [809, 810, 811, 812]
            Interface "opensw2-2"
        Port "opensw2"
            Interface "opensw2"
                type: internal
        Port "opensw2-1"
            trunks: [809, 810, 811, 812]
            Interface "opensw2-1"
    ovs_version: "2.0.0"
 
 
root@bj_dfgc_gitserver_2 vm]# brctl show
bridge name    bridge id        STP enabled    interfaces
br0        8000.000000000000    no        
br1        8000.000000000000    no        
br_p4p1        8000.2c534a0105e2    no        p4p1
                            vnet0
                            vnet1
                            vnet2
 
物理网卡的配置
[root@bj_dfgc_gitserver_2 vm]# cat /etc/sysconfig/network-scripts/ifcfg-p4p1
TYPE=Ethernet
BOOTPROTO=static
NAME=p4p1
DEVICE=p4p1
ONBOOT=yes
BRIDGE=br_p4p1
[root@bj_dfgc_gitserver_2 vm]# cat /etc/sysconfig/network-scripts/ifcfg-brp4p1
TYPE=bridge
ONBOOT=yes
DEVICE=br_p4p1
BOOTRPOTO=static
IPADDR=10.88.66.5
NETMASK=255.255.255.224
GATEWAY=10.88.66.1
DNS1=114.114.114.114
 
--------------------------------------------------------------------------------------------------------------------------
在虚机中开始openstack的安装实验
官方中文安装指导文档: https://docs.openstack.org/zh_CN/install-guide/
 
第二章 OpenStack环境准备
[root@openstack1 ~]# cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
 
                      eth0                  eth1                    eth2
openstack1   10.88.66.15       Trunk                  Trunk                 管理节点 
openstack2   10.88.66.16       Trunk                  Trunk                 计算节点  
 
修改主机名和hosts文件
echo -e "10.88.66.15 openstack1 \n10.88.66.16 openstack2 " >> /etc/hosts
 
管理节点
hostnamectl set-hostname openstack1
计算节点
hostnamectl set-hostname openstack2
 
基础软件包安装
     
基础软件包须要在全部的OpenStack节点上进行安装,包括控制节点和计算节点。
 
1.安装EPEL仓库
yum install epel-release -y
yum install -y http://mirrors.aliyun.com/epel/epel-release-latest-7.noarch.rpm
 
2.安装OpenStack仓库
 
从pike版本后版本都是直接centos基础源extras里了,能够直接yum
[root@openstack1 ~]# yum search openstack
Loaded plugins: fastestmirror
Determining fastest mirrors
* extras: mirror.jdcloud.com
* updates: mirror.jdcloud.com
=================================== N/S matched: openstack ====================================
ansible-openstack-modules.noarch : Unofficial Ansible modules for managing Openstack
centos-release-openstack-ocata.noarch : OpenStack from the CentOS Cloud SIG repo configs
centos-release-openstack-pike.x86_64 : OpenStack from the CentOS Cloud SIG repo configs
centos-release-openstack-queens.noarch : OpenStack from the CentOS Cloud SIG repo configs
centos-release-openstack-rocky.noarch : OpenStack from the CentOS Cloud SIG repo configs
diskimage-builder.noarch : Image building tools for OpenStack
golang-github-rackspace-gophercloud-devel.noarch : The Go SDK for Openstack
                                                 : http://gophercloud.io
php-opencloud.noarch : PHP SDK for OpenStack/Rackspace APIs
php-opencloud-doc.noarch : Documentation for PHP SDK for OpenStack/Rackspace APIs
python2-oslo-sphinx.noarch : OpenStack Sphinx Extensions and Theme for Python 2
 
  Name and summary matches only, use "search all" for everything.
 
[root@openstack1 ~]# yum install centos-release-openstack-pike -y
 
3.安装OpenStack客户端
yum install -y python-openstackclient
4.安装openstack SELinux管理包
yum install -y openstack-selinux
 
5.时间同步
yum install -y ntp
systemctl enable ntpd
ntpdate  time.pool.aliyun.com && hwclock -w
timedatectl set-timezone Asia/Shanghai
echo '0 3 * * * root /usr/sbin/ntpdate   time.pool.aliyun.com  && hwclock -w >/dev/null 2>&1' >>/etc/crontab  
echo '0 2 * * * root timedatectl set-timezone Asia/Shanghai && ntpdate time1.aliyun.com && hwclock -w >/dev/null 2>&1' >>/etc/crontab
 
---------------------------------------------------------------------------------------------------------------
MySQL数据库部署
 
MySQL安装
[root@openstack1 ~]# yum install -y mariadb mariadb-server python2-PyMySQL
 
配置数据库
适用于RHEL和CentOS的SQL数据库安装的官方文档: https://docs.openstack.org/install-guide/environment-sql-database-rdo.html
 
建立并编辑 /etc/my.cnf.d/openstack.cnf,而后完成以下动做:
在 [mysqld] 部分,设置 ``bind-address``值为控制节点的管理网络IP地址以使得其它节点能够经过管理网络访问数据库:
[mysqld]
bind-address = 10.88.66.15
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
完成安装
启动数据库服务,并将其配置为开机自启:
[root@openstack1 ~]# systemctl enable mariadb.service
[root@openstack1 ~]# systemctl start mariadb.service
为了保证数据库服务的安全性,运行``mysql_secure_installation``脚本。特别须要说明的是,为数据库的root用户设置一个适当的密码。(实验环境能够不设密码,更方便)
[root@openstack1 ~]# mysql_secure_installation
 
#Glance数据库
mysql -u root -e "CREATE DATABASE glance;"
mysql -u root -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';"
mysql -u root -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"
#Nova数据库
mysql -u root -e "CREATE DATABASE nova;"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
 
mysql -u root -e "CREATE DATABASE nova_api; "
mysql -u root -e " GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova'; "
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
 
mysql -u root -e "CREATE DATABASE nova_cell0;"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost'  IDENTIFIED BY 'nova';"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%'  IDENTIFIED BY 'nova';"
 
mysql -u root -e "CREATE DATABASE placement;"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost'   IDENTIFIED BY 'placement';"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%'    IDENTIFIED BY 'placement';"
#Neutron 数据库
mysql -u root -e "CREATE DATABASE neutron;"
mysql -u root -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';"
mysql -u root -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';"
#Cinder数据库
mysql -u root -e "CREATE DATABASE cinder;"
mysql -u root -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';"
mysql -u root -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"
 
#keystone数据库
mysql -u root -e "CREATE DATABASE keystone;"
mysql -u root -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';"
mysql -u root -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';"
 
 
注:N版之后nova多了一个nova_cell0的数据库
 
若是你的数据没有设置root密码本身执行下面的命令建立全部数据库
mysql -u root  -e "CREATE DATABASE glance;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"
mysql -u root  -e "CREATE DATABASE nova;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
mysql -u root  -e "CREATE DATABASE nova_api; "
mysql -u root  -e " GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova'; "
mysql -u root  -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
mysql -u root -e "CREATE DATABASE nova_cell0;"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost'  IDENTIFIED BY 'nova';"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%'  IDENTIFIED BY 'nova';"
mysql -u root  -e "CREATE DATABASE neutron;"
mysql -u root -e "CREATE DATABASE placement;"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost'   IDENTIFIED BY 'placement';"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%'    IDENTIFIED BY 'placement';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';"
mysql -u root  -e "CREATE DATABASE cinder;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"
mysql -u root  -e "CREATE DATABASE keystone;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';"
查看数据库和受权是否成功,以keystone数据库为示范
[root@openstack1 ~]# mysql -u root -e "SHOW GRANTS FOR keystone@'%';"
+---------------------------------------------------------------------------------------------------------+
| Grants for keystone@%                                                                                   |
+---------------------------------------------------------------------------------------------------------+
| GRANT USAGE ON *.* TO 'keystone'@'%' IDENTIFIED BY PASSWORD '*936E8F7AB2E21B47F6C9A7E5D9FE14DBA2255E5A' |
| GRANT ALL PRIVILEGES ON `keystone`.* TO 'keystone'@'%'                                                  |
+---------------------------------------------------------------------------------------------------------+
 
注:若是你想不输入密码登录mysql数据库,方法以下
[root@openstack1 ~]# vim .my.cnf
[client]
host=localhost
user='root'
password='YOURMYSQLPASSWORD'
 
 注意替换密码'YOURMYSQLPASSWORD'为你本身的mysql数据库密码
 
消息代理RabbitMQ
 
适用于RHEL和CentOS的消息队列安装官方文档: https://docs.openstack.org/install-guide/environment-messaging-rdo.html
 
1.安装RabbitMQ
安装包:
[root@openstack1 ~]# yum install -y rabbitmq-server
启动消息队列服务并将其配置为随系统启动:
 
[root@openstack1 ~]# systemctl enable rabbitmq-server.service
[root@openstack1 ~]# systemctl start rabbitmq-server.service
添加 openstack 用户:
 
[root@openstack1 ~]# rabbitmqctl add_user openstack openstack
 
注:在执行此操做时确保主机名和/etc/hosts里显示的一致,要不会操做失败并报错
用合适的密码替换 RABBIT_DBPASS。
 
给``openstack``用户配置写和读权限:
 
[root@openstack1 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
 
 
列出rabbitmq的插件:
[root@openstack1 ~]# rabbitmq-plugins list
Setting permissions for user "openstack" in vhost "/" ...
[root@openstack1 ~]# rabbitmq-plugins list
Configured: E = explicitly enabled; e = implicitly enabled
| Status:   * = running on rabbit@openstack1
|/
[  ] amqp_client                       3.6.5
[  ] cowboy                            1.0.3
[  ] cowlib                            1.0.1
[  ] mochiweb                          2.13.1
[  ] rabbitmq_amqp1_0                  3.6.5
[  ] rabbitmq_auth_backend_ldap        3.6.5
[  ] rabbitmq_auth_mechanism_ssl       3.6.5
[  ] rabbitmq_consistent_hash_exchange 3.6.5
[  ] rabbitmq_event_exchange           3.6.5
[  ] rabbitmq_federation               3.6.5
[  ] rabbitmq_federation_management    3.6.5
[  ] rabbitmq_jms_topic_exchange       3.6.5
[  ] rabbitmq_management               3.6.5
[  ] rabbitmq_management_agent         3.6.5
[  ] rabbitmq_management_visualiser    3.6.5
[  ] rabbitmq_mqtt                     3.6.5
[  ] rabbitmq_recent_history_exchange  1.2.1
[  ] rabbitmq_sharding                 0.1.0
[  ] rabbitmq_shovel                   3.6.5
[  ] rabbitmq_shovel_management        3.6.5
[  ] rabbitmq_stomp                    3.6.5
[  ] rabbitmq_top                      3.6.5
[  ] rabbitmq_tracing                  3.6.5
[  ] rabbitmq_trust_store              3.6.5
[  ] rabbitmq_web_dispatch             3.6.5
[  ] rabbitmq_web_stomp                3.6.5
[  ] rabbitmq_web_stomp_examples       3.6.5
[  ] sockjs                            0.3.4
[  ] webmachine                        1.10.3
 
---------------------------------------------------------------------------------------
 
开机自启动rabbitmq的管理插件:
[root@openstack1 ~]# rabbitmq-plugins enable rabbitmq_management
---------------------------------------------------------------------------------------
从新启动rabbitmq:
[root@openstack1 ~]# systemctl restart rabbitmq-server.service
---------------------------------------------------------------------------------------
再次查看监听的端口:web管理端口:15672
[root@openstack1 ~]# netstat -lntup
tcp        0      0 0.0.0.0:15672           0.0.0.0:*               LISTEN      11412/beam
---------------------------------------------------------------------------------------
 
web端打开10.88.66.15:15672        用户名 guest      密码 guest
登陆进去以后:
Admin------->复制administrator------->点击openstack------>Update this user-------->
Tags:粘帖administrator--------->密码都设置为openstack-------->logout
而后在登录:用户名 openstack  密码  openstack
 
安装Memcached
服务的身份服务身份验证机制使用Memcached来缓存令牌。memcached服务一般在控制器节点上运行。对于生产部署,咱们建议启用防火墙,身份验证和加密的组合以保护它。
安装和配置组件
1. 安装包:
[root@openstack1 ~]# yum install -y memcached python-memcached
2. 编辑/etc/sysconfig/memcached文件并完成如下操做:
    * 配置服务以使用控制器节点的管理IP地址。这是为了经过管理网络启用其余节点的访问:
OPTIONS="-l 10.88.66.15,::1"
 
完成安装
* 启动Memcached服务并将其配置为在系统引导时启动:
# systemctl enable memcached.service
# systemctl start memcached.service
 
第三章 OpenStack验证服务KeyStone
---------------------------------------------------------------------------------------
Keystone做用:用户与认证:用户权限与用户行为跟踪:
              服务目录:提供一个服务目录,包括全部服务项与相关Api的端点
User:用户   Tenant:租户 项目    Token:令牌   Role:角色   Service:服务   Endpoint:端点
----------------------------------------------------------------------------------------
1.安装keystone
[root@openstack1 ~]# yum install -y openstack-keystone httpd mod_wsgi
 
[root@openstack1 ~]# openssl rand -hex 10        ----生成随机码
dc46816a3e103ec2a700
 
编辑文件 /etc/keystone/keystone.conf 并完成以下动做:
 
在``[DEFAULT]``部分,定义初始管理令牌的值:
 
[DEFAULT]
...
admin_token = ADMIN_TOKEN
使用前面步骤生成的随机数替换``ADMIN_TOKEN`` 值。
 
在 [database] 部分,配置数据库访问:
 
[database]
...
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
将``KEYSTONE_DBPASS``替换为你为数据库选择的密码。
 
在``[token]``部分,配置Fernet UUID令牌的提供者。
 
[token]
...
provider = fernet
初始化身份认证服务的数据库:
 
 
 
完成后/etc/keystone/keystone.conf的配置
[root@openstack1 ~]# grep -vn '^$\|^#'  /etc/keystone/keystone.conf  
[DEFAULT]
admin_token = dc46816a3e103ec2a700
[assignment]
[auth]
[cache]
[catalog]
[cors]
[cors.subdomain]
[credential]
[database]
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[kvs]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[policy]
[profiler]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[signing]
[token]
provider = fernet
[tokenless_auth]
[trust]
 
 
 
 
-----------------------------------------------------------------------------------------------
 
同步数据库:注意权限,因此要用su -s 切换到keystone用户下执行:
 
[root@openstack1 ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
[root@openstack1 ~]# chown -R keystone:keystone /var/log/keystone/keystone.log  这个能够选
[root@openstack1 keystone]# mysql  -ukeystone -pkeystone keystone -e "use keystone;show tables;"
+------------------------+
| Tables_in_keystone     |
+------------------------+
| access_token           |
| assignment             |
| config_register        |
| consumer               |
| credential             |
| endpoint               |
| endpoint_group         |
| federated_user         |
| federation_protocol    |
| group                  |
| id_mapping             |
| identity_provider      |
| idp_remote_ids         |
| implied_role           |
| local_user             |
| mapping                |
| migrate_version        |
| nonlocal_user          |
| password               |
| policy                 |
| policy_association     |
| project                |
| project_endpoint       |
| project_endpoint_group |
| region                 |
| request_token          |
| revocation_event       |
| role                   |
| sensitive_config       |
| service                |
| service_provider       |
| token                  |
| trust                  |
| trust_role             |
| user                   |
| user_group_membership  |
| user_option            |
| whitelisted_config     |
+------------------------+
表已建立完毕,OK
 
初始化Fernet keys:
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
 
看到下面的的文件夹fernet-keys和credential-keys就说明上面初始化命令已经完成
[root@openstack1 ~]# ls -lh /etc/keystone/
total 136K
drwx------. 2 keystone keystone   24 Feb 28 14:16 credential-keys
-rw-r-----. 1 root     keystone 2.3K Nov  1 06:24 default_catalog.templates
drwx------. 2 keystone keystone   24 Feb 28 14:16 fernet-keys
-rw-r-----. 1 root     keystone 114K Feb 28 14:14 keystone.conf
-rw-r-----. 1 root     keystone 2.5K Nov  1 06:24 keystone-paste.ini
-rw-r-----. 1 root     keystone 1.1K Nov  1 06:24 logging.conf
-rw-r-----. 1 root     keystone    3 Nov  1 17:21 policy.json
-rw-r-----. 1 keystone keystone  665 Nov  1 06:24 sso_callback_template.html
 
 
 
 
 
----------------------------------------------------------------------------------
 
配置 Apache HTTP 服务器
 
编辑``/etc/httpd/conf/httpd.conf`` 文件,配置``ServerName`` 选项为控制节点:
Listen 0.0.0.0:80
ServerName localhost:80
 
必需要配置httpd的ServerName,不然keystone服务不能起来
 
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
 
下面的内容为文件 /etc/httpd/conf.d/wsgi-keystone.conf的内容。并用apache来代理它:5000  正常的api来访问  35357  管理访问的端口
[root@openstack1 ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
[root@openstack1 ~]# cat /etc/httpd/conf.d/wsgi-keystone.conf
Listen 0.0.0.0:5000
Listen 0.0.0.0:35357
 
<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    LimitRequestBody 114688
    <IfVersion >= 2.4>
      ErrorLogFormat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone.log
    CustomLog /var/log/httpd/keystone_access.log combined
 
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
 
<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    LimitRequestBody 114688
    <IfVersion >= 2.4>
      ErrorLogFormat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone.log
    CustomLog /var/log/httpd/keystone_access.log combined
 
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
 
Alias /identity /usr/bin/keystone-wsgi-public
<Location /identity>
    SetHandler wsgi-script
    Options +ExecCGI
 
    WSGIProcessGroup keystone-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
</Location>
 
Alias /identity_admin /usr/bin/keystone-wsgi-admin
<Location /identity_admin>
    SetHandler wsgi-script
    Options +ExecCGI
 
    WSGIProcessGroup keystone-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
</Location>
 
 
哈哈,和N版比内容增长了很多
 
 
---------------------------------------------------------------------------------------------------
启动 Apache HTTP 服务并配置其随系统启动:
[root@openstack1 ~]# systemctl enable httpd
[root@openstack1 ~]# systemctl start httpd
---------------------------------------------------------------------------------------------------
查看端口: 
[root@openstack1 ~]# netstat -lntup|grep httpd
tcp        0      0 0.0.0.0:35357           0.0.0.0:*               LISTEN      16721/http         
tcp        0      0 0.0.0.0:5000            0.0.0.0:*               LISTEN      16721/http         
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      16721/http
 
 
 
查看日志/var/log/keystone/keystone.log
没有ERROR说明keystone启动正常
[root@openstack1 ~]# tail /var/log/keystone/keystone.log
2018-03-05 09:54:52.488 91245 INFO migrate.versioning.api [-] done
2018-03-05 09:54:52.488 91245 INFO migrate.versioning.api [-] 3 -> 4...
2018-03-05 09:54:52.626 91245 INFO migrate.versioning.api [-] done
2018-03-05 09:55:23.417 91267 INFO keystone.common.fernet_utils [-] key_repository does not appear to exist; attempting to create it
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Created a new key: /etc/keystone/fernet-keys/0
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Starting key rotation with 1 key files: ['/etc/keystone/fernet-keys/0']
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Current primary key is: 0
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Next primary key will be: 1
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Promoted key 0 to be the primary: 1
2018-03-05 09:55:23.419 91267 INFO keystone.common.fernet_utils [-] Created a new key: /etc/keystone/fernet-keys/0
---------------------------------------------------------------------------------------------------
 
建立验证用户及地址版本信息:
[root@openstack1 ~]# grep -n '^admin_token' /etc/keystone/keystone.conf
18:admin_token = dc46816a3e103ec2a700
 
[root@openstack1 ~]# export OS_TOKEN=dc46816a3e103ec2a700    -------设置环境变量
[root@openstack1 ~]# export OS_URL= http://10.88.66.15:35357/v3
[root@openstack1 ~]# export OS_IDENTITY_API_VERSION=3
 
建立域、项目、用户和角色
 
身份认证服务为每一个OpenStack服务提供认证服务。认证服务使用 T domains, projects (tenants), :term:`users<user>`和 :term:`roles<role>`的组合。
 
建立域``default``:
openstack domain create --description "Default Domain" default
 
在你的环境中,为进行管理操做,建立管理的项目、用户和角色:
 
建立 admin 项目:
 
openstack project create --domain default --description "Admin Project" admin
 
注解
 
OpenStack 是动态生成 ID 的,所以您看到的输出会与示例中的命令行输出不相同。
 
建立 admin 用户:
 
openstack user create --domain default --password-prompt admin
 
建立 admin 角色:
 
openstack role create admin
 
添加``admin`` 角色到 admin 项目和用户上,并受权admin的角色:
 
[root@openstack1 ~]# openstack role add --project admin --user admin admin
注解
 
这个命令执行后没有输出。
注解
你建立的任何角色必须映射到每一个OpenStack服务配置文件目录(/etc/keystone/)下的``policy.json`` 文件中。默认策略是给予“admin“角色大部分服务的管理访问权限。更多信息,参考 ``Operations Guide - Managing Projects and Users <http://docs.openstack.org/ops-guide/opsrojects-users.html>`__.
 
扩展:最好把注册时已经添加的admin用户删除,由于你不知道密码...
 
 
建立``demo`` 项目:
 
openstack project create --domain default --description "Demo Project" demo
注解
 
当为这个项目建立额外用户时,不要重复这一步。
 
建立``demo`` 用户:
 
openstack user create --domain default --password-prompt demo
 
建立 user 角色:
 
openstack role create user
 
添加 user``角色到 ``demo 项目和用户:
 
openstack role add --project demo --user demo user
 
 
本指南使用一个你添加到你的环境中每一个服务包含独有用户的service 项目。建立``service``项目:
openstack project create --domain default --description "Service Project" service
 
快速粘贴命令行
export OS_TOKEN=dc46816a3e103ec2a700
export OS_URL=http://10.88.66.15:35357/v3
export OS_IDENTITY_API_VERSION=3
openstack domain create --description "Default Domain" default
openstack project create --domain default --description "Admin Project" admin
openstack user create --domain default --password-prompt admin
openstack role create admin
openstack role add --project admin --user admin admin
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user
openstack project create --domain default --description "Service Project" service
--------------------------------------------------------------------------------------------------
 
查看建立的用户及角色:
 
[root@openstack1 ~]# openstack user list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| 6457b53386544638b44af8217d531f3f | demo  |
| a178e2b2c5234e2389c574b3474b5cc2 | admin |
+----------------------------------+-------+
[root@openstack1 ~]# openstack role list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| e2d5ea992b774b788504f3e6ec437fed | user  |
| eff56f6f078f41439f9c60a32a5cc411 | admin |
+----------------------------------+-------+
[root@openstack1 ~]# openstack project list
+----------------------------------+---------+
| ID                               | Name    |
+----------------------------------+---------+
| 63c27b21277a46dc90cf9bc50521f511 | demo    |
| bf40e05ce8c042a4ae0caa3dfd53f758 | service |
| e1f3d0d070534e14b2a538979b955bb7 | admin   |
+----------------------------------+---------+
 
-------------------------------------------------------------------------------------------------
 
建立glance用户:
 
openstack user create --domain default --password=glance glance
将此用户加入到项目里面并给它赋予admin的权限:
openstack role add --project service --user glance admin
 
建立nova用户:
openstack user create --domain default --password=nova nova
openstack role add --project service --user nova admin
 
建立nova[placement]用户:
openstack user create --domain default --password=placement placement
openstack role add --project service --user placement admin
 
建立neutron用户:
openstack user create --domain default --password=neutron neutron
openstack role add --project service --user neutron admin
 
 
 
引导身份服务:
keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://10.88.66.15:35357/v3/ --bootstrap-internal-url http://10.88.66.15:5000/v3/ --bootstrap-public-url http://10.88.66.15:5000/v3/ --bootstrap-region-id RegionOne
 
建立服务实体和API端点
在你的Openstack环境中,认证服务管理服务目录。服务使用这个目录来决定您的环境中可用的服务。
 
建立服务实体和身份认证服务:
 
[root@openstack1 ~]# openstack service create --name keystone --description "OpenStack Identity" identity
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | bf5c6f371d3541a083a8bc7a4f4a91a5 |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+
注解
 
OpenStack 是动态生成 ID 的,所以您看到的输出会与示例中的命令行输出不相同。
 
身份认证服务管理了一个与您环境相关的 API 端点的目录。服务使用这个目录来决定如何与您环境中的其余服务进行通讯。
 
OpenStack使用三个API端点变种表明每种服务:admin,internal和public。默认状况下,管理API端点容许修改用户和租户而公共和内部APIs不容许这些操做。在生产环境中,处于安全缘由,变种为了服务不一样类型的用户可能驻留在单独的网络上。对实例而言,公共API网络为了让顾客管理他们本身的云在互联网上是可见的。管理API网络在管理云基础设施的组织中操做也是有所限制的。内部API网络可能会被限制在包含OpenStack服务的主机上。此外,OpenStack支持可伸缩性的多区域。为了简单起见,本指南为全部端点变种和默认``RegionOne``区域都使用管理网络。
 
建立认证服务的 API 端点:
 
openstack endpoint create --region RegionOne identity public http://10.88.66.15:5000/v3
openstack endpoint create --region RegionOne identity internal http://10.88.66.15:5000/v3
openstack endpoint create --region RegionOne identity admin http://10.88.66.15:35357/v3
 
验证操做
 
在安装其余服务以前确认身份认证服务的操做。
注解
在控制节点上执行这些命令。
 
[root@openstack1 ~]# unset OS_TOKEN OS_URL
[root@openstack1 ~]# openstack --os-auth-url http://10.88.66.15:35357/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
Password:
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:08:28+00:00                                                                                                                                                               |
| id         | gAAAAABam6l8hb5pRSRCSHlQEXvcGHNT1OeSB4U2ggAsDnDYSm0audYmYpw-A_HxkA4udKZkC7q1pnomxRw7Eim2EAkLlicORtORw_OwdrSoEhgLKehUv-8l-x28vP1zFUHA8eB1EbJXCZuaqZ8HtdbgwE005jGryF1H9jqRTzOSs9GFhUPgg |
| project_id | e1f3d0d070534e14b2a538979b955bb7                                                                                                                                                        |
| user_id    | a178e2b2c5234e2389c574b3474b5cc2                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
 
到此处说明keystone已经成功了
[root@openstack1 ~]# openstack --os-auth-url http://10.88.66.15:35357/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name demo --os-username demo token issue 
Password:
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:10:43+00:00                                                                                                                                                               |
| id         | gAAAAABam6oDRhGnlNikctPM2eeu-jZ7DkYRvl_KVD0AKww5yAjIb1soyORUxZ4Ga1V6N-Jl-CGttoNFNBO0Q7vtpIX9pTALXG7wbGjfVNIuWdnNblqf9pDNaPrQ97BS2TPIRw8lBEwsEynDi_j_3ogd0Uu0W5vhgaIGopj65R9h5t76QqiGRl8 |
| project_id | 63c27b21277a46dc90cf9bc50521f511                                                                                                                                                        |
| user_id    | d53e2219188c4438b27f055d8d5cacc8                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
 
 
建立 OpenStack 客户端环境脚本
建立 admin 和 ``demo``项目和用户建立客户端环境变量脚本
[root@openstack1 ~]# vim admin-openstack.sh
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://10.88.66.15:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
 
 
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack token issue
+------------+------------------------------------------------------------------------------------------+
| Field      | Value                                                                                    |
+------------+------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:15:04+00:00                                                                |
| id         | gAAAAABam6sIQEo1XKq9ggR6wujmNy55yzjn1B8We9e_CQmfiyiFegTgSiVIpst45QmpAZEiRKk6CzltBKMP5xBi |
|            | AHfCUobdArDXYUoOTU_jJAvdhGZ05Ae8qD2Fny1Bh4eSRKgDpWMPJY_FzBNSGtIwGO2LQ42rXbw0TykvOQyoZGJM |
|            | X9QGmko                                                                                  |
| project_id | e1f3d0d070534e14b2a538979b955bb7                                                         |
| user_id    | a178e2b2c5234e2389c574b3474b5cc2                                                         |
+------------+------------------------------------------------------------------------------------------+
 
 
[root@openstack1 ~]# vim demo-openstack.sh           
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://10.88.66.15:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
 
[root@openstack1 ~]# source demo-openstack.sh     
[root@openstack1 ~]# openstack token issue   
+------------+------------------------------------------------------------------------------------------+
| Field      | Value                                                                                    |
+------------+------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:17:29+00:00                                                                |
| id         | gAAAAABam6uZwCBZV717q4oFuTSJoZ_lFtBz2AS351y7UUbSfFR4FNnwfZl_xkKgvDE2HLyq4NTiPIbFziAaDWya |
|            | Tpm1p2U_yPfY8X1CI8CJ2lYV2qdMSFjEnHWjB4lNNeKTU4xocCRLGZCMFqOZX5eYjxXavbyATMyJnIfYFC1AD9fR |
|            | gWTUz4k                                                                                  |
| project_id | 63c27b21277a46dc90cf9bc50521f511                                                         |
| user_id    | d53e2219188c4438b27f055d8d5cacc8                                                         |
+------------+------------------------------------------------------------------------------------------+
 
 
第四章 OpenStack镜像服务Glance
 
glance主要由三个部分组成:glance-api、glance-registry以及image store
glance-api:接受云系统镜像的建立、删除、读取请求
glance-registry:云系统的镜像注册服务
 
1.先决条件
glance服务建立:
source admin-openstack.sh
openstack service create --name glance --description "OpenStack Image service" image
 
建立镜像服务的 API 端点:
openstack endpoint create --region RegionOne   image public http://10.88.66.15:9292
openstack endpoint create --region RegionOne   image internal http://10.88.66.15:9292
openstack endpoint create --region RegionOne   image admin http://10.88.66.15:9292
 
[root@openstack1 ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                          |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| 52912cb0e8db41b181b7ba219651dc87 | RegionOne | glance       | image        | True    | internal  | http://10.88.66.15:9292      |
| 645ce0753cda48dd8747f2db25b5fb26 | RegionOne | glance       | image        | True    | admin     | http://10.88.66.15:9292      |
| 790c71a14102407ca6b441853be6cf5b | RegionOne | glance       | image        | True    | public    | http://10.88.66.15:9292      |
| 84dda84836094b2494c3478406756373 | RegionOne | keystone     | identity     | True    | internal  | http://10.88.66.15:5000/v3/  |
| 9050f9fec5b1484684380e5adb32746f | RegionOne | keystone     | identity     | True    | admin     | http://10.88.66.15:35357/v3/ |
| de71ff11b10d4ab094d44dc58af7c68e | RegionOne | keystone     | identity     | True    | public    | http://10.88.66.15:5000/v3/  |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
 
 
2.安装和配置组件
glance的安装:
 
[root@openstack1 ~]# yum install -y openstack-glance python-glance python-glanceclient
 
 
编辑文件 /etc/glance/glance-api.conf和/etc/glance/glance-registry.conf  并完成以下动做:
在 [database] 部分,配置数据库访问:
[database]
...
connection = mysql+pymysql://glance:glance@10.88.66.15/glance
 
 
同步数据库:
 
[root@openstack1 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
 
检查数据库是否同步:
 
[root@openstack1 ~]# mysql -uglance -pglance  -e "use glance;show tables;"
+----------------------------------+
| Tables_in_glance                 |
+----------------------------------+
| alembic_version                  |
| image_locations                  |
| image_members                    |
| image_properties                 |
| image_tags                       |
| images                           |
| metadef_namespace_resource_types |
| metadef_namespaces               |
| metadef_objects                  |
| metadef_properties               |
| metadef_resource_types           |
| metadef_tags                     |
| migrate_version                  |
| task_info                        |
| tasks                            |
+----------------------------------+
 
 
 
配置keystone与glance-api.conf的连接:
 
编辑/etc/glance/glance-api.conf文件 [keystone_authtoken] 和 [paste_deploy] 部分,配置认证服务访问:
 
 
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
 
[paste_deploy]
flavor = keystone
 
注:N版后keystone认证版本升级,注意配置时相应的提升版本配置,不然会出现openstack image list 报http 500的错误,后面的keystone认证版本都要改,但后面不在提示。
下面是报错示范
[root@openstack1 ~]# openstack image list
Internal Server Error (HTTP 500)
 
在 [glance_store] 部分,配置本地文件系统存储和镜像文件位置:
 
[glance_store]
...
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
 
扩展:使用NFS做为镜像文件存放位置
能够直接挂载NFS文件到镜像文件的存放位置
mount -t nfs 10.30.1.203:/data /var/lib/glance/images
 
配置keystone与glance-registry.conf的连接:
编辑/etc/glance/glance-registry.conf文件 [keystone_authtoken] 和 [paste_deploy] 部分,配置认证服务访问:
 
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
 
[paste_deploy]
...
flavor = keystone
 
 
查看/etc/glance/glance-api.conf和/etc/glance/glance-registry.conf是否和下面同样
 
# grep -v '^#\|^$' /etc/glance/glance-api.conf
[DEFAULT]
[cors]
[cors.subdomain]
[database]
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images
[image_format]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
 
 
# grep -v '^#\|^$' /etc/glance/glance-registry.conf
[DEFAULT]
[database]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
 
-------------------------------------------------------------------------------------------
 
启动glance服务并设置开机启动:
systemctl enable openstack-glance-api
systemctl enable openstack-glance-registry 
systemctl start openstack-glance-api 
systemctl start openstack-glance-registry
 
 
-------------------------------------------------------------------------------------------
 
监听端口: registry:9191     api:9292
 
[root@openstack1 ~]# netstat -antup
tcp        0      0 0.0.0.0:9292            0.0.0.0:*               LISTEN      10845/python2              
tcp        0      0 0.0.0.0:9191            0.0.0.0:*               LISTEN      10864/python2 
-------------------------------------------------------------------------------------------
 
 
[root@openstack1 ~]#  glance image-list
+----+------+
| ID | Name |
+----+------+
+----+------+
 
若是执行glance image-list命令出现以上画面则表示glance安装成功了。
 
 
注:若是出现以下报错示范,通常是/etc/glance/glance-api.conf或者/etc/glance/glance-registry.conf里auth_uri和auth_uri的配置有错误,在
Ocata版之前auth_uri=http://10.88.66.15:5000,Ocata版及之后为http://10.88.66.15:5000/v3
 
[root@openstack1 ~]# openstack image list
Internal Server Error (HTTP 500)
 
 
拓展:
glance image-list 和openstack image list命令的效果是同样的
 
 
---------------------------------------------------------------------------------------------------
 
glance验证操做
 
下载源镜像:
 
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
openstack image create "cirros3.5"  --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
+------------------+------------------------------------------------------+
| Field            | Value                                                |
+------------------+------------------------------------------------------+
| checksum         | f8ab98ff5e73ebab884d80c9dc9c7290                     |
| container_format | bare                                                 |
| created_at       | 2019-02-18T02:48:39Z                                 |
| disk_format      | qcow2                                                |
| file             | /v2/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47/file |
| id               | 0b105463-a9a2-4f89-a4d7-75ae54dc4a47                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | cirros3.5                                            |
| owner            | 6840d3aa8b814d9caa54432ce44471b6                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 13267968                                             |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2019-02-18T02:48:40Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+------------------+------------------------------------------------------+
 
 
 
 
------------------------------------------------------------------------------------------------
 
查看镜像:
 
[root@openstack1 ~]# openstack image list
+--------------------------------------+-----------+--------+
| ID                                   | Name      | Status |
+--------------------------------------+-----------+--------+
| 0b105463-a9a2-4f89-a4d7-75ae54dc4a47 | cirros3.5 | active |
+--------------------------------------+-----------+--------+
 
[root@openstack1 ~]# glance image-list        
glance image-list
+--------------------------------------+-----------+
| ID                                   | Name      |
+--------------------------------------+-----------+
| 0b105463-a9a2-4f89-a4d7-75ae54dc4a47 | cirros3.5 |
+--------------------------------------+-----------+
 
镜像存放位置:
[root@openstack1 ~]# find / -name "0b105463-a9a2-4f89-a4d7-75ae54dc4a47"
/var/lib/glance/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47
[root@openstack1 ~]# ls -lh /var/lib/glance/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47
-rw-r-----. 1 glance glance 13M Feb 18 10:48 /var/lib/glance/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47
 
------------------------------------------------------------------------------------------------
 
第五章 Openstack计算服务Nova
 
Nova控制节点(openstack虚拟机必备组件:keystone,glance,nova,neutron)
 
API:负责接收和响应外部请求,支持openstack API,EC2API
Cert:负责身份认证
Scheduler:用于云主机调度
Conductor:计算节点访问数据的中间件
Consoleleauth:用于控制台的受权验证
Novncproxy:VNC代理
Nova API组件实现了RESTful API功能,是外部访问Nova的惟一途径。
 
接收外部请求并经过Message Queue将请求发送给其余的服务组件,同时也兼容EC2 API,因此也能够用EC2的管理
工具对nova进行平常管理。
 
Nova Scheduler模块在openstack中的做用就是决策虚拟机建立在哪一个主机(计算节点)上。
决策一个虚机应该调度到某物理节点,须要分两个步骤:
 
         过滤(Fliter)             计算权值(Weight)
 
Fliter Scheduler首先获得未通过滤的主机列表,而后根据过滤属性,选择符合条件的计算节点主机。
通过主机过滤后,须要对主机进行权值的计算,根据策略选择相应的某一台主机(对于每个要建立的虚拟机而言)
 

1.先决条件
[root@openstack1 ~]# source admin-openstack.sh
 
 
nova服务建立:
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://10.88.66.15:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://10.88.66.15:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://10.88.66.15:8774/v2.1
 
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://10.88.66.15:8778
openstack endpoint create --region RegionOne placement internal http://10.88.66.15:8778
openstack endpoint create --region RegionOne placement admin http://10.88.66.15:8778
 
2.Nova控制节点部署    openstack1
首先咱们须要先在控制节点部署除nova-compute以外的其它必备的服务。
安装nova控制节点:
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
编辑``/etc/nova/nova.conf``文件并完成下面的操做:
在``[DEFAULT]``部分,只启用计算和元数据API:
[DEFAULT]
...
enabled_apis = osapi_compute,metadata
在``[api_database]``和``[database]``部分,配置数据库的链接:
[api_database]
...
connection = mysql+pymysql://nova:nova@10.88.66.15/nova_api
[database]
...
connection = mysql+pymysql://nova:nova@10.88.66.15/nova
 
在 “[DEFAULT]” 部分,配置 “RabbitMQ” 消息队列访问:
#在 “[DEFAULT]” 和 “[oslo_messaging_rabbit]”部分,配置 “RabbitMQ” 消息队列访问:
[DEFAULT]
...
 
# rpc_backend = rabbit
# [oslo_messaging_rabbit]
# ...
# rabbit_host = 10.88.66.15
# rabbit_userid = openstack
# rabbit_password = openstack
 
 
在 “[DEFAULT]” 和 “[keystone_authtoken]” 部分,配置认证服务访问:
[DEFAULT]
...
auth_strategy = keystone
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
 
 
注解
在 [keystone_authtoken] 中注释或者删除其余选项。
 
在该[DEFAULT]部分中,配置my_ip选项以使用控制器节点的管理接口IP地址:
[DEFAULT] 
#... 
my_ip  =  10.88.66.15
 
注:若是不配置my_ip选项,那么后面配置中有$my_ip的部分请变动为控制器节点的管理接口ip
在 [DEFAULT] 部分,使能 Networking 服务:
[DEFAULT]
...
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
注解
默认状况下,计算服务使用内置的防火墙服务。因为网络服务包含了防火墙服务,你必须使用``nova.virt.firewall.NoopFirewallDriver``防火墙服务来禁用掉计算服务内置的防火墙服务
在``[vnc]``部分,配置VNC代理使用控制节点的管理接口IP地址 :
 
[vnc]
...
enabled  =  true
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
 
在 [glance] 区域,配置镜像服务 API 的位置:
[glance]
...
api_servers = http://10.88.66.15:9292
在 [oslo_concurrency] 部分,配置锁路径:
[oslo_concurrency]
...
lock_path = /var/lib/nova/tmp
 
在该[placement]部分中,配置Placement API:
 
[placement]
# auth_uri = http://10.88.66.15:5000/v3
# auth_url = http://10.88.66.15:35357/v3
# memcached_servers = 10.88.66.15:11211
# os_region_name = RegionOne
# project_domain_name = default
# project_name = service
# user_domain_name = default
# username = placement
# password = placement
# auth_type = password
 
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
username = placement
password = placement
 
配置nova.conf文件
 
# grep -v "^#\|^$"  /etc/nova/nova.conf
[DEFAULT]
my_ip  =  10.88.66.15
transport_url = rabbit://openstack:openstack@10.88.66.15
auth_strategy = keystone
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
enabled_apis=osapi_compute,metadata
[api]
[api_database]
connection = mysql+pymysql://nova:nova@10.88.66.15/nova_api
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
connection = mysql+pymysql://nova:nova@10.88.66.15/nova
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.88.66.15:35357/v3
username = placement
password = placement
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled  =  true
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]
[DEFAULT]
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
compute_driver = libvirt.LibvirtDriver
osapi_compute_workers = 2
multi_host = True
enabled_apis = osapi_compute,metadata
osapi_compute_listen = 0.0.0.0
instance_name_template = instance-%08x
my_ip = 10.88.66.15
debug = True
instances_path = /var/lib/nova/instances
base_dir_name = _base
[api]
auth_strategy = keystone
[wsgi]
[scheduler]
workers = 2
driver = filter_scheduler
discover_hosts_in_cells_interval = 300
[filter_scheduler]
track_instance_changes = False
enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter
[key_manager]
[database]
[api_database]
[glance]
api_servers = http://10.88.66.15:9292
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
project_domain_name = default
project_name = service
user_domain_name = default
password = nova
username = nova
auth_type = password
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
[conductor]
workers = 2
[cinder]
os_region_name = RegionOne
[libvirt]
live_migration_bandwidth = 0
live_migration_uri = qemu+ ssh://stack@%s/system
virt_type = kvm
[placement]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
os_region_name = RegionOne
project_domain_name = default
project_name = service
user_domain_name = default
password = placement
username = placement
auth_type = password
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[neutron]
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357/v3
service_metadata_proxy = True
metadata_proxy_shared_secret = syscloud.cn
region_name = RegionOne
auth_strategy = keystone
project_domain_name = default
project_name = service
user_domain_name = default
password = neutron
username = neutron
auth_type = password
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.15
novncproxy_base_url = //http://43.239.121.156:6080/vnc_auto.html
 
 
因为 打包错误,您必须经过将如下配置添加到如下内容来启用对Placement API的访问/etc/httpd/conf.d/00-nova-placement-api.conf
 
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
 
00-nova-placement-api.conf的配置示范
Listen 8778
 
<VirtualHost *:8778>
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
  WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
  WSGIScriptAlias / /usr/bin/nova-placement-api
  <IfVersion >= 2.4>
    ErrorLogFormat "%M"
  </IfVersion>
  ErrorLog /var/log/nova/nova-placement-api.log
  #SSLEngine On
  #SSLCertificateFile ...
  #SSLCertificateKeyFile ...
</VirtualHost>
 
Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
  SetHandler wsgi-script
  Options +ExecCGI
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
</Location>
 
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
 
# cat /etc/httpd/conf.d/00-nova-placement-api.conf
Listen 0.0.0.0:8778
 
<VirtualHost *:8778>
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
  WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
  WSGIScriptAlias / /usr/bin/nova-placement-api
  <IfVersion >= 2.4>
    ErrorLogFormat "%M"
  </IfVersion>
  ErrorLog /var/log/nova/nova-placement-api.log
  #SSLEngine On
  #SSLCertificateFile ...
  #SSLCertificateKeyFile ...
<Directory />
    Options All
    AllowOverride All
    Require all granted
  </Directory>
 
 
  <Directory /usr/bin/nova-placement-api>
    Options All
    AllowOverride All
    Require all granted
  </Directory>
</VirtualHost>
 
Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
  SetHandler wsgi-script
  Options +ExecCGI
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
</Location>
 
 
重启 httpd服务:
systemctl restart httpd
同步 nova-api数据库:
su -s /bin/sh -c "nova-manage api_db sync" nova
 
 注意
忽略此输出中的任何弃用消息。
注册cell0数据库:
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
 
建立cell1单元格:
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
 
填充nova数据库:
su -s /bin/sh -c "nova-manage db sync" nova
 
验证nova cell0和cell1是否正确注册:
# nova-manage cell_v2 list_cells
+-------+--------------------------------------+-------------------------------------+--------------------------------------------------+
|  Name |                 UUID                 |            Transport URL            |               Database Connection                |
+-------+--------------------------------------+-------------------------------------+--------------------------------------------------+
| cell0 | 00000000-0000-0000-0000-000000000000 |                none:/               | mysql+pymysql://nova:****@10.88.66.15/nova_cell0 |
| cell1 | 6e2b9b4b-cbd4-4f21-8d9d-fe2df37fb3e3 | rabbit://openstack:****@10.88.66.15 |    mysql+pymysql://nova:****@10.88.66.15/nova    |
+-------+--------------------------------------+-------------------------------------+--------------------------------------------------+
 
查看nova和nova_api,nova_cell0数据库是否写入成功
# mysql -unova -p'nova' -e "use nova_api;show tables;"
+------------------------------+
| Tables_in_nova_api           |
+------------------------------+
| aggregate_hosts              |
| aggregate_metadata           |
| aggregates                   |
| allocations                  |
.
.
.
| resource_provider_traits     |
| resource_providers           |
| traits                       |
| users                        |
+------------------------------+
# mysql -unova -p'nova' -e "use nova;show tables;"
+--------------------------------------------+
| Tables_in_nova                             |
+--------------------------------------------+
| agent_builds                               |
| aggregate_hosts                            |
| aggregate_metadata                         |
| aggregates                                 |
| allocations                                |
| block_device_mapping                       |
| bw_usage_cache                             |
| cells                                      |
| certificates                               |
| compute_nodes                              |
.
.
.
| shadow_volume_usage_cache                  |
| snapshot_id_mappings                       |
| snapshots                                  |
| tags                                       |
| task_log                                   |
| virtual_interfaces                         |
| volume_id_mappings                         |
| volume_usage_cache                         |
+--------------------------------------------+
# mysql -unova -p'nova' -e "use nova_cell0;show tables;"
+--------------------------------------------+
| Tables_in_nova_cell0                       |
+--------------------------------------------+
| agent_builds                               |
| aggregate_hosts                            |
| aggregate_metadata                         |
| aggregates                                 |
| allocations                                |
| block_device_mapping                       |
| bw_usage_cache                             |
| cells                                      |
.
.
.
| shadow_snapshots                           |
| shadow_task_log                            |
| shadow_virtual_interfaces                  |
| shadow_volume_id_mappings                  |
| shadow_volume_usage_cache                  |
| snapshot_id_mappings                       |
| snapshots                                  |
| tags                                       |
| task_log                                   |
| virtual_interfaces                         |
| volume_id_mappings                         |
| volume_usage_cache                         |
+--------------------------------------------+
 
 
完成安装
启动Compute服务并将其配置为在系统引导时启动:
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-compute.service
systemctl start  openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-compute.service
 
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
 
 
N版之后nova部分改动较大,参考文档: https://docs.openstack.org/nova/pike/install/controller-install-rdo.html
------------------------------------------------------------------------------------------------
 
3.Nova计算节点部署 openstack2
nova-compute通常运行在计算节点上,经过message queue接收并管理VM的生命周期
nova-compute经过libvirt管理KVM,经过XenAPI管理Xen
 
Nova计算节点基础软件包安装已经在开始的部分完成了,这里再也不叙述。
 
[root@openstack2 ~]# yum install -y openstack-nova-compute 
 
编辑``/etc/nova/nova.conf``文件并完成下面的操做:
 
在该[DEFAULT]部分中,仅启用计算和元数据API:
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata
 
在[DEFAULT]部分,配置``RabbitMQ``消息队列的链接:
[DEFAULT]
...
transport_url = rabbit://openstack:openstack@10.88.66.15
 
注: Openstack N版之后不在支持rpc_backend设置
在   [api] 和 [keystone_authtoken] 部分,配置认证服务访问:
[api]
...
auth_strategy = keystone
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
 
注解
在 [keystone_authtoken] 中注释或者删除其余选项。
在 [DEFAULT] 部分,使能 Networking 服务:
[DEFAULT]
...
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
注解
缺省状况下,Compute 使用内置的防火墙服务。因为 Networking 包含了防火墙服务,因此你必须经过使用 nova.virt.firewall.NoopFirewallDriver 来去除 Compute 内置的防火墙服务。
 
在``[vnc]``部分,启用并配置远程控制台访问:
 
[vnc]
...
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://10.88.66.15:6080/vnc_auto.html 
 
服务器组件监听全部的 IP 地址,而代理组件仅仅监听计算节点管理网络接口的 IP 地址。基本的 URL 指示您可使用 web 浏览器访问位于该计算节点上实例的远程控制台的位置。
 
注解
若是你运行浏览器的主机没法解析``controller`` 主机名,你能够将 ``controller``替换为你控制节点管理网络的IP地址。
在 [glance] 区域,配置镜像服务 API 的位置:
 
[glance]
...
api_servers = http://10.88.66.15:9292
在 [oslo_concurrency] 部分,配置锁路径:
 
[oslo_concurrency]
...
lock_path = /var/lib/nova/tmp
 
在该[placement]部分中,配置Placement API:
 
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.88.66.15:35357/v3
username = placement
password = placement
 
 
 
 
[root@openstack2 ~]# grep -v '^#\|^$' /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@10.88.66.15
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.88.66.15:35357/v3
username = placement
password = placement
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://10.88.66.15:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
[root@openstack2 ~]# cat /etc/nova/nova.conf
[DEFAULT]
auth_strategy = keystone
use_neutron = True
compute_driver = libvirt.LibvirtDriver
firewall_driver=nova.virt.firewall.NoopFirewallDriver
[api]
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
live_migration_bandwidth = 0
#live_migration_uri = qemu+tcp://%s/system
live_migration_uri = qemu+ ssh://stack@%s/system
#cpu_mode = none
virt_type = kvm
[matchmaker_redis]
[metrics]
[mks]
 
[neutron]
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357/v3
service_metadata_proxy = True
metadata_proxy_shared_secret = syscloud.cn
region_name = RegionOne
auth_strategy = keystone
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
 
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
driver = messagingv2
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
 
[libvirt]
live_migration_bandwidth = 0
#live_migration_uri = qemu+tcp://%s/system
live_migration_uri = qemu+ ssh://stack@%s/system
#cpu_mode = none
virt_type = kvm
 
[placement]
 
os_region_name = RegionOne
project_domain_name = default
project_name = service
user_domain_name = default
password = placement
username = placement
auth_url = http://10.88.66.15:35357/v3
auth_type = password
 
 
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://43.239.121.156:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
 
 
完成安装
肯定您的计算节点是否支持虚拟机的硬件加速。
 
$ egrep -c '(vmx|svm)' /proc/cpuinfo
若是这个命令返回了 one or greater 的值,那么你的计算节点支持硬件加速且不须要额外的配置。
 
若是这个命令返回了 zero 值,那么你的计算节点不支持硬件加速。你必须配置 libvirt 来使用 QEMU 去代替 KVM
在 /etc/nova/nova.conf 文件的 [libvirt] 区域作出以下的编辑:
[libvirt]
...
virt_type = qemu
 
 
 
启动计算服务及其依赖,并将其配置为随系统自动启动:
 
[root@openstack2 ~]# systemctl enable libvirtd.service openstack-nova-compute.service
[root@openstack2 ~]# systemctl start libvirtd.service openstack-nova-compute.service
 
 
验证是否成功:
 
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary           | Host       | Zone     | Status  | State | Updated At                 |
+----+------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-consoleauth | openstack1 | internal | enabled | up    | 2019-02-18T07:16:40.000000 |
|  2 | nova-scheduler   | openstack1 | internal | enabled | up    | 2019-02-18T07:16:39.000000 |
|  3 | nova-conductor   | openstack1 | internal | enabled | up    | 2019-02-18T07:16:39.000000 |
|  9 | nova-compute     | openstack2 | nova     | enabled | up    | 2019-02-18T07:16:34.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+
 
 
或者使用下面的命令作验证
[root@openstack1 ~]# openstack host list
+------------+-------------+----------+
| Host Name  | Service     | Zone     |
+------------+-------------+----------+
| openstack1 | consoleauth | internal |
| openstack1 | scheduler   | internal |
| openstack1 | conductor   | internal |
| openstack2 | compute     | nova     |
+------------+-------------+----------+
 
[root@openstack1 ~]# nova service-list
+--------------------------------------+------------------+------------+----------+---------+-------+----------------------------+-----------------+-------------+
| Id                                   | Binary           | Host       | Zone     | Status  | State | Updated_at                 | Disabled Reason | Forced down |
+--------------------------------------+------------------+------------+----------+---------+-------+----------------------------+-----------------+-------------+
| 4790ca20-37c3-4fbf-92d1-72a7b584f6f6 | nova-consoleauth | openstack1 | internal | enabled | up    | 2019-02-18T07:19:10.000000 | -               | False       |
| 69a69d43-98c3-436e-866b-03d7944d4186 | nova-scheduler   | openstack1 | internal | enabled | up    | 2019-02-18T07:19:10.000000 | -               | False       |
| 14bb7cc2-0e80-4ef5-9f28-0775a69d7943 | nova-conductor   | openstack1 | internal | enabled | up    | 2019-02-18T07:19:09.000000 | -               | False       |
| b20775d6-213e-403d-bfc5-2a3c3f6438e1 | nova-compute     | openstack2 | nova     | enabled | up    | 2019-02-18T07:19:14.000000 | -               | False       |
+--------------------------------------+------------------+------------+----------+---------+-------+----------------------------+-----------------+-------------+
 
若是出现此四个服务则表明nova建立成功了
 
验证nova与glance的链接,以下说明成功
[root@openstack1 ~]# openstack image list
+--------------------------------------+-----------+--------+
| ID                                   | Name      | Status |
+--------------------------------------+-----------+--------+
| 0b105463-a9a2-4f89-a4d7-75ae54dc4a47 | cirros3.5 | active |
+--------------------------------------+-----------+--------+
 
[root@openstack1 ~]# openstack image show d464af77-9588-43e7-a3d4-3f5f26000030
+------------------+------------------------------------------------------+
| Field            | Value                                                |
+------------------+------------------------------------------------------+
| checksum         | f8ab98ff5e73ebab884d80c9dc9c7290                     |
| container_format | bare                                                 |
| created_at       | 2019-02-18T02:48:39Z                                 |
| disk_format      | qcow2                                                |
| file             | /v2/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47/file |
| id               | 0b105463-a9a2-4f89-a4d7-75ae54dc4a47                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | cirros3.5                                            |
| owner            | 6840d3aa8b814d9caa54432ce44471b6                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 13267968                                             |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2019-02-18T02:48:40Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+------------------+------------------------------------------------------+
 
 
注:因为到N版openstack时,nova image-list命令已经不支持了(变成glance image-list 或openstack image list),因此只能用上面的命令了
 
 
 
N版后官方推荐的验证办法:
# openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary           | Host       | Zone     | Status  | State | Updated At                 |
+----+------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-consoleauth | openstack1 | internal | enabled | up    | 2019-02-18T07:21:30.000000 |
|  2 | nova-scheduler   | openstack1 | internal | enabled | up    | 2019-02-18T07:21:40.000000 |
|  3 | nova-conductor   | openstack1 | internal | enabled | up    | 2019-02-18T07:21:40.000000 |
|  9 | nova-compute     | openstack2 | nova     | enabled | up    | 2019-02-18T07:21:34.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+
 
验证nova与keystone的链接,以下说明成功
# openstack catalog list
+-----------+-----------+------------------------------------------+
| Name      | Type      | Endpoints                                |
+-----------+-----------+------------------------------------------+
| nova      | compute   | RegionOne                                |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                          |                                         |
| keystone  | identity  | RegionOne                                |
|           |           |   internal: http://10.88.66.15:5000/v3/  |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:35357/v3/    |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:5000/v3/    |
|           |           |                                          |
| placement | placement | RegionOne                                |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:8778        |
|           |           |                                          |
| glance    | image     | RegionOne                                |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:9292        |
|           |           |                                          |
+-----------+-----------+------------------------------------------+
 
# nova-status upgrade check
+---------------------------------------------------------------------+
| Upgrade Check Results                                               |
+---------------------------------------------------------------------+
| Check: Cells v2                                                     |
| Result: Success                                                     |
| Details: None                                                       |
+---------------------------------------------------------------------+
| Check: Placement API                                                |
| Result: Success                                                     |
| Details: None                                                       |
+---------------------------------------------------------------------+
| Check: Resource Providers                                           |
| Result: Success                                                     |
| Details: None                                                       |
+---------------------------------------------------------------------+
 
 
 
故障处理:openstack catalog list出现2个keystone....
 
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  |                                             |
| keystone  | identity  | RegionOne                                   |
|           |           |   public: http://10.88.66.15:5000/v3/    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:5000/v3/  |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:35357/v3/    |
|           |           |                                             |
+-----------+-----------+---------------------------------------------+
 
 
 
解决办法删除多余的keystone服务
[root@openstack1 ~]# openstack service list
+----------------------------------+-----------+-----------+
| ID                               | Name      | Type      |
+----------------------------------+-----------+-----------+
| 41455c0be35b4eea8fb7caeecbc2f23f | nova      | compute   |
| 9d948d63775a48a7a34ce104852f079f | placement | placement |
| b9065427be214b5bb5a80f62e4f03e6c | glance    | image     |
| bf5c6f371d3541a083a8bc7a4f4a91a5 | keystone  | identity  |
| d6101418a4b6409db2b4865fd5ae5c9c | keystone  | identity  |
+----------------------------------+-----------+-----------+
[root@openstack1 ~]# openstack service show  bf5c6f371d3541a083a8bc7a4f4a91a5
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | bf5c6f371d3541a083a8bc7a4f4a91a5 |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+
[root@openstack1 ~]# openstack service show  d6101418a4b6409db2b4865fd5ae5c9c
+---------+----------------------------------+
| Field   | Value                            |
+---------+----------------------------------+
| enabled | True                             |
| id      | d6101418a4b6409db2b4865fd5ae5c9c |
| name    | keystone                         |
| type    | identity                         |
+---------+----------------------------------+
 
正确操做:
[root@openstack1 ~]# openstack service delete bf5c6f371d3541a083a8bc7a4f4a91a5
 
错误操做示范:
[root@openstack1 ~]# openstack service delete d6101418a4b6409db2b4865fd5ae5c9c
[root@openstack1 ~]# nova-status upgrade check
+-------------------------------------------------------------------+
| Upgrade Check Results                                             |
+-------------------------------------------------------------------+
| Check: Cells v2                                                   |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: Placement API                                              |
| Result: Failure                                                   |
| Details: Placement service credentials do not work.               |
+-------------------------------------------------------------------+
| Check: Resource Providers                                         |
| Result: Warning                                                   |
| Details: There are no compute resource providers in the Placement |
|   service but there are 2 compute nodes in the deployment.        |
|   This means no compute nodes are reporting into the              |
|   Placement service and need to be upgraded and/or fixed.         |
|   See                                                             |
|   http://docs.openstack.org/developer/nova/placement.html         |
|   for more details.                                               |
+-------------------------------------------------------------------+
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  |                                             |
+-----------+-----------+---------------------------------------------+
[root@openstack1 ~]# openstack service delete bf5c6f371d3541a083a8bc7a4f4a91a5
Failed to delete consumer with type, name or ID 'bf5c6f371d3541a083a8bc7a4f4a91a5': admin endpoint for identity service not found
1 of 1 services failed to delete.
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  |                                             |
+-----------+-----------+---------------------------------------------+
 
 
-----------------------------------------------------------------------------------------------------------------------------------------------
删除后出现这么的状况,哈哈 不要慌,不要绝望,分析后咱们找到了解决办法
[root@openstack1 ~]# openstack service list
admin endpoint for identity service not found
[root@openstack1 ~]# openstack endpoint list
admin endpoint for identity service not found
从新设置引导身份服务:
keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://10.88.66.15:35357/v3/ --bootstrap-internal-url http://10.88.66.15:5000/v3/ --bootstrap-public-url http://10.88.66.15:5000/v3/ --bootstrap-region-id RegionOne
 
 
查看是否恢复正常
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack service list
+----------------------------------+-----------+-----------+
| ID                               | Name      | Type      |
+----------------------------------+-----------+-----------+
| 41455c0be35b4eea8fb7caeecbc2f23f | nova      | compute   |
| 9d948d63775a48a7a34ce104852f079f | placement | placement |
| b9065427be214b5bb5a80f62e4f03e6c | glance    | image     |
| bf5c6f371d3541a083a8bc7a4f4a91a5 | keystone  | identity  |
+----------------------------------+-----------+-----------+
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  | RegionOne                                   |
|           |           |   public: http://10.88.66.15:5000/v3/    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:5000/v3/  |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:35357/v3/    |
|           |           |                                             |
+-----------+-----------+---------------------------------------------+
[root@openstack1 ~]# openstack user list
+----------------------------------+-----------+
| ID                               | Name      |
+----------------------------------+-----------+
| 02c2f92b32634152953918e7ca13d14d | nova      |
| 1a8166683c8d40aeba637c45a048df61 | glance    |
| 3a48f785d42949e4ab29787ba529df68 | admin     |
| 6693774dd0a545d09905461ea7ad3c84 | demo      |
| 8ff779da2c264adc92fc1b66a5c327a6 | placement |
| 9176221496ed45e6b34cbc6110f13e2b | neutron   |
+----------------------------------+-----------+
[root@openstack1 ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                          |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| 1333885c9bad499b94143429e3c38929 | RegionOne | glance       | image        | True    | internal  | http://10.88.66.15:9292      |
| 153b5834edf1475a9f99ced71bd92a0f | RegionOne | glance       | image        | True    | public    | http://10.88.66.15:9292      |
| 1b9f96da57fb403a863f9dd5aad39421 | RegionOne | placement    | placement    | True    | public    | http://10.88.66.15:8778      |
| 1ddbe1ec55a040d9a8561e3e5fe715fc | RegionOne | keystone     | identity     | True    | public    | http://10.88.66.15:5000/v3/  |
| 304c7f89985343b79bc3cadb94e87c64 | RegionOne | glance       | image        | True    | admin     | http://10.88.66.15:9292      |
| 5fcd1f1eb77c4d5c99101a20fe6326c0 | RegionOne | nova         | compute      | True    | admin     | http://10.88.66.15:8774/v2.1 |
| 68a4d1c7495f497fa40332145934316e | RegionOne | nova         | compute      | True    | internal  | http://10.88.66.15:8774/v2.1 |
| 833c55c98183435e82f57e759b177d76 | RegionOne | keystone     | identity     | True    | internal  | http://10.88.66.15:5000/v3/  |
| b0c08596c6744f0487f2b86b45360313 | RegionOne | keystone     | identity     | True    | admin     | http://10.88.66.15:35357/v3/ |
| ce8a5d83f2434b5fae7707dd593a7b58 | RegionOne | nova         | compute      | True    | public    | http://10.88.66.15:8774/v2.1 |
| d8b4085f5c3d4b31b1f9905bd8484edc | RegionOne | placement    | placement    | True    | admin     | http://10.88.66.15:8778      |
| df402d8d32d64ea18082dcbc8704a91d | RegionOne | placement    | placement    | True    | internal  | http://10.88.66.15:8778      |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
[root@openstack1 ~]#
 
 
 
这里在补充一些相似的错误:
2018-07-19 12:45:03,998 - util.py[WARNING]: 'http://169.254.169.254/2009-04-04/meta-data/instance-id' failed [114/120s]: url error [[Errno 111] Connection refused]
2018-07-19 12:45:11,005 - DataSourceEc2.py[CRITICAL]: giving up on md after 121 seconds
 
缘由分析:
计算节点的nova-api,没有启动。经过日志排除错误:
root@node18:~# tail /var/log/nova/nova-api.log
2018-07-19 20:21:29 CRITICAL nova [-] No module named keystone.middleware.auth_token
2018-07-19 20:27:10 CRITICAL nova [-] No module named keystone.middleware.auth_token
解决方案:
计算节点没法找到keystone。安装keystone client就能够了。
yum install -y python-keystone python-keystoneclient
 
 
报错:Placement service credentials do not work. 
 
[root@openstack2 ~]# cat /var/log/nova/nova-compute.log
2019-02-28 17:27:00.024 30253 WARNING nova.scheduler.client.report [req-e963d119-3730-4651-a057-cd7961cf43eb - - - - -] Placement service credentials do not work. Placement is optional in Newton, but required in Ocata. Please enable the placement service before upgrading.: Unauthorized: The request you have made requires authentication. (HTTP 401) (Request-ID: req-9b309a61-59a9-4188-8197-7fbb15d1f72e)
2019-02-28 17:27:00.871 30253 ERROR nova.compute.manager [req-e963d119-3730-4651-a057-cd7961cf43eb - - - - -] Error updating resources for node openstack2.: ResourceProviderCreationFailed: Failed to create resource provider openstack2
 
# nova-status upgrade check
+-------------------------------------------------------------------+
| Upgrade Check Results                                             |
+-------------------------------------------------------------------+
| Check: Cells v2                                                   |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: Placement API                                              |
| Result: Failure                                                   |
| Details: Placement service credentials do not work.               |
+-------------------------------------------------------------------+
| Check: Resource Providers                                         |
| Result: Warning                                                   |
| Details: There are no compute resource providers in the Placement |
|   service but there are 2 compute nodes in the deployment.        |
|   This means no compute nodes are reporting into the              |
|   Placement service and need to be upgraded and/or fixed.         |
|   See                                                             |
|   http://docs.openstack.org/developer/nova/placement.html         |
|   for more details.                                               |
+-------------------------------------------------------------------+
 
解决办法:https://ask.openstack.org/en/question/105800/ocatathe-placement-api-endpoint-not-found-placement-is-optional-in-newton-but-required-in-ocata-please-enable-the-placement-service-before-upgrading/
 
报错:Cells v2  Failure
[root@node2 ~]# nova-status upgrade check
Option "os_region_name" from group "placement" is deprecated. Use option "region-name" from group "placement".
+-------------------------------------------------------------------+
| Upgrade Check Results                                             |
+-------------------------------------------------------------------+
| Check: Cells v2                                                   |
| Result: Failure                                                   |
| Details: No host mappings found but there are compute nodes. Run  |
|   command 'nova-manage cell_v2 simple_cell_setup' and then        |
|   retry.                                                          |
+-------------------------------------------------------------------+
| Check: Placement API                                              |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: Resource Providers                                         |
| Result: Warning                                                   |
| Details: There are no compute resource providers in the Placement |
|   service but there are 2 compute nodes in the deployment.        |
|   This means no compute nodes are reporting into the              |
|   Placement service and need to be upgraded and/or fixed.         |
|   See                                                             |
|   https://docs.openstack.org/nova/latest/user/placement.html      |
|   for more details.                                               |
+-------------------------------------------------------------------+
| Check: Ironic Flavor Migration                                    |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: API Service Version                                        |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
 
解决办法:nova-manage cell_v2 simple_cell_setup
 
--------------------------------------------------------------------------------------------------------------------------------------------
 
 
第六章 Openstack网络服务Neutron
 
1.先决条件
注册neutron网络服务:
 
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack service create --name neutron --description "OpenStack Networking" network
[root@openstack1 ~]# openstack endpoint create --region RegionOne network public http://10.88.66.15:9696
[root@openstack1 ~]# openstack endpoint create --region RegionOne network internal http://10.88.66.15:9696
[root@openstack1 ~]# openstack endpoint create --region RegionOne network admin http://10.88.66.15:9696
 
2.配置网络选项
 
Neutron在控制节点部署  openstack1
[root@openstack1 ~]# yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
 
Neutron在计算节点中的部署  openstack2
[root@openstack2 ~]# yum install -y openstack-neutron-linuxbridge ebtables ipset
 
 
因为Neutron的配置根据网络的不一样也会有些区别,下面的示范以最经常使用的使用Linuxbridge驱动的flat网络模型
 
Neutron控制节点配置  openstack1
编辑/etc/neutron/neutron.conf文件并完成以下操做:
 
在 [database] 部分,配置数据库访问:
 
[database]
...
connection = mysql+pymysql://neutron:neutron@10.88.66.15/neutron
 
在``[DEFAULT]``部分,启用ML2插件并禁用其余插件:
[DEFAULT]
...
core_plugin = ml2
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
 
在该[DEFAULT]部分中,启用模块化第2层(ML2)插件并禁用其余插件:
[DEFAULT]
# ...
core_plugin = ml2
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
 
在 “[DEFAULT]” 和 “[keystone_authtoken]” 部分,配置认证服务访问:
 
[DEFAULT]
...
auth_strategy = keystone
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
 
在 [DEFAULT]部分,配置 “RabbitMQ” 消息队列的链接:
 
[DEFAULT]
...
transport_url = rabbit://openstack:openstack@10.88.66.15
 
 
 
注解
 
在 [keystone_authtoken] 中注释或者删除其余选项。
在``[DEFAULT]``和``[nova]``部分,配置网络服务来通知计算节点的网络拓扑变化:
[DEFAULT]
...
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[nova]
...
memcached_servers = 10.88.66.15:11211
signing_dir = /var/cache/neutron
project_domain_name = Default
project_name = service
user_domain_name = Default
password = nova
username = nova
auth_url = http://10.88.66.15:35357/v3
auth_type = password
region_name = RegionOne
 
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
 
在 [oslo_concurrency] 部分,配置锁路径:
[oslo_concurrency]
...
lock_path = /var/lib/neutron/tmp
 
 
# grep -v "^#\|^$" /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
auth_strategy = keystone
transport_url = rabbit://openstack:openstack@10.88.66.15
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[agent]
[cors]
[database]
connection = mysql+pymysql://neutron:neutron@10.88.66.15/neutron
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
[matchmaker_redis]
[nova]
auth_url = http://10.88.66.15:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
[DEFAULT]
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
rpc_state_report_workers = 0
api_workers = 2
notify_nova_on_port_data_changes = True
notify_nova_on_port_status_changes = True
auth_strategy = keystone
allow_overlapping_ips = True
debug = True
core_plugin = ml2
bind_host = 0.0.0.0
use_syslog = False
[agent]
[cors]
[database]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
[matchmaker_redis]
[nova]
memcached_servers = 10.88.66.15:11211
signing_dir = /var/cache/neutron
project_domain_name = Default
project_name = service
user_domain_name = Default
password = nova
username = nova
auth_url = http://10.88.66.15:35357/v3
auth_type = password
region_name = RegionOne
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
 
配置 Modular Layer 2 (ML2) 插件
ML2插件使用Linuxbridge机制来为实例建立layer-2虚拟网络基础设施
 
编辑 /etc/neutron/plugins/ml2/ml2_conf.ini文件并完成如下操做:
 
在``[ml2]``部分,启用flat和VLAN网络:
 
[ml2]
...
type_drivers = flat,vlan,gre,vxlan,geneve
在``[ml2]``部分,禁用私有网络:
 
[ml2]
...
tenant_network_types =vlan
在``[ml2]``部分,启用Linuxbridge机制:
 
[ml2]
...
mechanism_drivers = linuxbridge,openvswitch
警告
 
在你配置完ML2插件以后,删除可能致使数据库不一致的``type_drivers``项的值。
在``[ml2]`` 部分,启用端口安全扩展驱动:
[ml2]
...
extension_drivers = port_security
在``[ml2_type_flat]``部分,配置公共虚拟网络为flat网络
[ml2_type_flat]
...
flat_networks = external
在 ``[securitygroup]``部分,启用 ipset 增长安全组规则的高效性:
[securitygroup]
...
enable_ipset = true
 
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
type_drivers = flat,vlan,gre,vxlan,geneve
tenant_network_types = vlan
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = external
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
network_vlan_ranges = default:1:4000,external:1:4000
[ml2_type_vxlan]
[securitygroup]
enable_ipset = true
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
tenant_network_types = vlan
extension_drivers = port_security
mechanism_drivers = linuxbridge
type_drivers = local,flat,vlan,gre,vxlan,geneve
[ml2_type_flat]
flat_networks = external
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
network_vlan_ranges = default:1:4000,external:1:4000
[ml2_type_vxlan]
[securitygroup]
enable_ipset = True
 
配置Linuxbridge代理
Linuxbridge代理为实例创建layer-2虚拟网络而且处理安全组规则。
 
编辑``/etc/neutron/plugins/ml2/linuxbridge_agent.ini``文件而且完成如下操做:
 
在该[linux_bridge]部分中,将提供者虚拟网络映射到提供者物理网络接口:
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
 
 
在``[vxlan]``部分,禁止VXLAN覆盖网络:
 
[vxlan]
enable_vxlan = false
在 ``[securitygroup]``部分,启用安全组并配置 Linuxbridge iptables firewall driver:
[securitygroup]
...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
 
配置DHCP代理
The DHCP agent provides DHCP services for virtual networks.
 
编辑/etc/neutron/dhcp_agent.ini文件并完成下面的操做:
 
在``[DEFAULT]``部分,配置Linuxbridge驱动接口,DHCP驱动并启用隔离元数据,这样在公共网络上的实例就能够经过网络来访问元数据
 
[DEFAULT]
...
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
[agent]
[ovs]
 
 
 
 
配置元数据代理
The :term:`metadata agent <Metadata agent>`负责提供配置信息,例如:访问实例的凭证
编辑``/etc/neutron/metadata_agent.ini``文件并完成如下操做:
在``[DEFAULT]`` 部分,配置元数据主机以及共享密码:
[DEFAULT]
...
nova_metadata_ip = 10.88.66.15
metadata_proxy_shared_secret = syscloud.cn
 
# grep -v '^#\|^$' /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip = 10.88.66.15
metadata_proxy_shared_secret = syscloud.cn
[agent]
[cache]
 
 
 
配置l3
 
# grep -v '^#\|^$' /etc/neutron/l3_agent.ini
[DEFAULT]
ovs_use_veth = False
interface_driver = linuxbridge
debug = True
[agent]
[ovs]
 
 
 
 
为计算节点配置网络服务
编辑``/etc/nova/nova.conf``文件并完成如下操做:
 
在``[neutron]``部分,配置访问参数,启用元数据代理并设置密码:
 
 
[neutron]
...
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = syscloud.cn
 
 
完成安装
网络服务初始化脚本须要一个超连接 /etc/neutron/plugin.ini``指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini``。若是超连接不存在,使用下面的命令建立它:
 
[root@openstack1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
 
同步数据库:
 
[root@openstack1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
注解
 
数据库的同步发生在 Networking 以后,由于脚本须要完成服务器和插件的配置文件。
 
重启计算API 服务:
 
[root@openstack1 ~]# systemctl restart openstack-nova-api.service
当系统启动时,启动 Networking 服务并配置它启动。
 
对于两种网络选项:
 
[root@openstack1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@openstack1 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
对于网络选项2,一样启用layer-3服务并设置其随系统自启动
 
[root@openstack1 ~]# systemctl enable neutron-l3-agent.service
[root@openstack1 ~]# systemctl start neutron-l3-agent.service
 
 
检验nentron在控制节点是否OK
[root@openstack1 ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 36134331-0c29-4eaa-b287-93e69836d419 | DHCP agent         | openstack1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
| 67b10d2b-2438-40e1-8402-70219cd5100c | Metadata agent     | openstack1 | None              | :-)   | UP    | neutron-metadata-agent    |
| 6e40171c-6be3-49a7-93d0-ee54ce831025 | Linux bridge agent | openstack1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
| 7fbb4072-6358-4cf6-8b6e-9631bb0c9eac | L3 agent           | openstack1 | nova              | :-)   | UP    | neutron-l3-agent          |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
 
 
 
终极检验示范:
[root@openstack1 ~]# openstack extension list --network
+----------------------------------------------------------------------------------------------+---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
| Name                                                                                         | Alias                     | Description                                                                                                                                              |
+----------------------------------------------------------------------------------------------+---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
| Default Subnetpools                                                                          | default-subnetpools       | Provides ability to mark and use a subnetpool as the default                                                                                             |
| Network IP Availability                                                                      | network-ip-availability   | Provides IP availability data for each network and subnet.                                                                                               |
| Network Availability Zone                                                                    | network_availability_zone | Availability zone support for network.                                                                                                                   |
| Network MTU (writable)                                                                       | net-mtu-writable          | Provides a writable MTU attribute for a network resource.                                                                                                |
| Port Binding                                                                                 | binding                   | Expose port bindings of a virtual port to external application                                                                                           |
| agent                                                                                        | agent                     | The agent management extension.                                                                                                                          |
| Subnet Allocation                                                                            | subnet_allocation         | Enables allocation of subnets from a subnet pool                                                                                                         |
| DHCP Agent Scheduler                                                                         | dhcp_agent_scheduler      | Schedule networks among dhcp agents                                                                                                                      |
| Tag support                                                                                  | tag                       | Enables to set tag on resources.                                                                                                                         |
| Neutron external network                                                                     | external-net              | Adds external network attribute to network resource.                                                                                                     |
| Neutron Service Flavors                                                                      | flavors                   | Flavor specification for Neutron advanced services                                                                                                       |
| Network MTU                                                                                  | net-mtu                   | Provides MTU attribute for a network resource.                                                                                                           |
| Availability Zone                                                                            | availability_zone         | The availability zone extension.                                                                                                                         |
| Quota management support                                                                     | quotas                    | Expose functions for quotas management per tenant                                                                                                        |
| Tag support for resources with standard attribute: trunk, policy, security_group, floatingip | standard-attr-tag         | Enables to set tag on resources with standard attribute.                                                                                                 |
| If-Match constraints based on revision_number                                                | revision-if-match         | Extension indicating that If-Match based on revision_number is supported.                                                                                |
| Provider Network                                                                             | provider                  | Expose mapping of virtual networks to physical networks                                                                                                  |
| Multi Provider Network                                                                       | multi-provider            | Expose mapping of virtual networks to multiple physical networks                                                                                         |
| Quota details management support                                                             | quota_details             | Expose functions for quotas usage statistics per project                                                                                                 |
| Address scope                                                                                | address-scope             | Address scopes extension.                                                                                                                                |
| Subnet service types                                                                         | subnet-service-types      | Provides ability to set the subnet service_types field                                                                                                   |
| Resource timestamps                                                                          | standard-attr-timestamp   | Adds created_at and updated_at fields to all Neutron resources that have Neutron standard attributes.                                                    |
| Neutron Service Type Management                                                              | service-type              | API for retrieving service providers for Neutron advanced services                                                                                       |
| Tag support for resources: subnet, subnetpool, port, router                                  | tag-ext                   | Extends tag support to more L2 and L3 resources.                                                                                                         |
| Neutron Extra DHCP options                                                                   | extra_dhcp_opt            | Extra options configuration for DHCP. For example PXE boot options to DHCP clients can be specified (e.g. tftp-server, server-ip-address, bootfile-name) |
| Resource revision numbers                                                                    | standard-attr-revisions   | This extension will display the revision number of neutron resources.                                                                                    |
| Pagination support                                                                           | pagination                | Extension that indicates that pagination is enabled.                                                                                                     |
| Sorting support                                                                              | sorting                   | Extension that indicates that sorting is enabled.                                                                                                        |
| security-group                                                                               | security-group            | The security groups extension.                                                                                                                           |
| RBAC Policies                                                                                | rbac-policies             | Allows creation and modification of policies that control tenant access to resources.                                                                    |
| standard-attr-description                                                                    | standard-attr-description | Extension to add descriptions to standard attributes                                                                                                     |
| Port Security                                                                                | port-security             | Provides port security                                                                                                                                   |
| Allowed Address Pairs                                                                        | allowed-address-pairs     | Provides allowed address pairs                                                                                                                           |
| project_id field enabled                                                                     | project-id                | Extension that indicates that project_id field is enabled.                                                                                               |
+----------------------------------------------------------------------------------------------+---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
 
 
Neutron计算节点配置  openstack2
 
neutron计算节点:(将neutron的配置文件拷贝到计算节点)
 
编辑/etc/neutron/neutron.conf文件并完成如下操做:
 
在该[database]部分中,注释掉任何connection选项,由于计算节点不直接访问数据库。
 
在该[DEFAULT]部分中,配置RabbitMQ 消息队列访问:
 
[DEFAULT]
...
transport_url = rabbit://openstack:openstack@10.88.66.15
 
在 “[DEFAULT]” 和 “[keystone_authtoken]” 部分,配置认证服务访问:
 
[DEFAULT]
...
auth_strategy = keystone
 
[keystone_authtoken]
...
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
 
在 [oslo_concurrency] 部分,配置锁路径:
 
[oslo_concurrency]
...
lock_path = /var/lib/neutron/tmp
 
 
# grep -v '^#\|^$' /etc/neutron/neutron.conf
[DEFAULT]
auth_strategy = keystone
transport_url = rabbit://openstack:openstack@10.88.66.15
[agent]
[cors]
[database]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
配置网络选项
选择与您以前在控制节点上选择的相同的网络选项。以后,回到这里并进行下一步:为计算节点配置网络服务。
 
配置Linux网桥代理
Linux网桥代理为实例构建第2层(桥接和交换)虚拟网络基础结构并处理安全组。
 
编辑/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件并完成如下操做:
 
在本[linux_bridge]节中,将提供者虚拟网络映射到提供者物理网络接口:
 
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
替换PROVIDER_INTERFACE_NAME为底层提供商物理网络接口的名称。有关 更多信息,请参阅主机网络
 
在该[vxlan]部分中,禁用VXLAN覆盖网络:
 
[vxlan]
enable_vxlan = false
在本[securitygroup]节中,启用安全组并配置Linux网桥iptables防火墙驱动程序:
 
[securitygroup]
...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
 
 
[root@openstack2 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
为计算节点配置网络服务
编辑/etc/nova/nova.conf文件并完成下面的操做:
 
在``[neutron]`` 部分,配置访问参数:
 
[neutron]
...
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
 
[root@openstack2 ~]#  grep -v "^#\|^$"  /etc/nova/nova.conf
[DEFAULT]
auth_strategy = keystone
use_neutron = True
compute_driver = libvirt.LibvirtDriver
firewall_driver=nova.virt.firewall.NoopFirewallDriver
[api]
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
live_migration_bandwidth = 0
live_migration_uri = qemu+ ssh://stack@%s/system
virt_type = kvm
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357/v3
service_metadata_proxy = True
metadata_proxy_shared_secret = syscloud.cn
region_name = RegionOne
auth_strategy = keystone
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
driver = messagingv2
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[libvirt]
inject_password = true
inject_partition = -1
live_migration_bandwidth = 0
live_migration_uri = qemu+ ssh://stack@%s/system
virt_type = kvm
[placement]
os_region_name = RegionOne
project_domain_name = default
project_name = service
user_domain_name = default
password = placement
username = placement
auth_url = http://10.88.66.15:35357/v3
auth_type = password
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://10.88.66.15:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
 
 
 
完成安装
重启计算服务:
 
[root@openstack2 ~]# systemctl restart openstack-nova-compute.service
启动Linuxbridge代理并配置它开机自启动:
 
[root@openstack2 ~]# systemctl enable neutron-linuxbridge-agent.service
[root@openstack2 ~]# systemctl start neutron-linuxbridge-agent.service
 
检验nentron在计算节点是否OK
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 36134331-0c29-4eaa-b287-93e69836d419 | DHCP agent         | openstack1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
| 67b10d2b-2438-40e1-8402-70219cd5100c | Metadata agent     | openstack1 | None              | :-)   | UP    | neutron-metadata-agent    |
| 6e40171c-6be3-49a7-93d0-ee54ce831025 | Linux bridge agent | openstack1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
| 7fbb4072-6358-4cf6-8b6e-9631bb0c9eac | L3 agent           | openstack1 | nova              | :-)   | UP    | neutron-l3-agent          |
| c5fbf4e0-0d72-40b0-bb53-c383883a0d19 | Linux bridge agent | openstack2 | None              | :-)   | UP    | neutron-linuxbridge-agent |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
 
表明计算节点的Linux bridge agent已成功链接到控制节点。
 
 
----------------------------------------------------------------------------------------------------------------
 
 
 
第七章 Openstack管理服务Horizon
 
安装软件包:
 
# yum install openstack-dashboard -y
编辑文件 /etc/openstack-dashboard/local_settings 并完成以下动做:
 
在 controller 节点上配置仪表盘以使用 OpenStack 服务:
 
OPENSTACK_HOST = "10.88.66.15"
容许全部主机访问仪表板:
 
ALLOWED_HOSTS = ['*', ]
配置 memcached 会话存储服务:
 
#SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': '10.88.66.15:11211',
    }
}
 
 
 
启用第3版认证API:
 
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
启用对域的支持
 
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
配置API版本:
 
OPENSTACK_API_VERSIONS = {
     "identity": 3,
     "volume": 2,
     "image": 2,
     "compute": 2,
}
经过仪表盘建立用户时的默认域配置为 default :
 
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
经过仪表盘建立的用户默认角色配置为 user :
 
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
若是您选择网络参数1,禁用支持3层网络服务:
 
OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': True,
    'enable_quotas': True,
    'enable_ipv6': True,
    'enable_distributed_router': True,
    'enable_ha_router': True,
    'enable_lb': True,
    'enable_firewall': True,
    'enable_vpn': True,
    'enable_fip_topology_check': True,
}
能够选择性地配置时区:
 
TIME_ZONE = "Asia/Shanghai"
 
 
 
 
最终配置示范:
# grep -v '#\|^$' /etc/openstack-dashboard/local_settings
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.settings import HORIZON_CONFIG
DEBUG = False
WEBROOT = '/dashboard/'
ALLOWED_HOSTS = ['*', ]
OPENSTACK_API_VERSIONS = {
     "identity": 3,
     "volume": 2,
     "image": 2,
     "compute": 2,
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
LOCAL_PATH = '/tmp'
SECRET_KEY='3f508e8a4399dffa3323'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': '10.88.66.15:11211',
    },
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
OPENSTACK_HOST = "0.0.0.0"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_KEYSTONE_BACKEND = {
    'name': 'native',
    'can_edit_user': True,
    'can_edit_group': True,
    'can_edit_project': True,
    'can_edit_domain': True,
    'can_edit_role': True,
}
OPENSTACK_HYPERVISOR_FEATURES = {
    'can_set_mount_point': False,
    'can_set_password': True,
    'requires_keypair': False,
    'enable_quotas': True
}
OPENSTACK_CINDER_FEATURES = {
    'enable_backup': True,
}
OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': True,
    'enable_quotas': True,
    'enable_ipv6': True,
    'enable_distributed_router': True,
    'enable_ha_router': True,
    'enable_lb': True,
    'enable_firewall': True,
    'enable_vpn': True,
    'enable_fip_topology_check': True,
}
OPENSTACK_HEAT_STACK = {
    'enable_user_pass': True,
}
IMAGE_CUSTOM_PROPERTY_TITLES = {
    "architecture": _("Architecture"),
    "kernel_id": _("Kernel ID"),
    "ramdisk_id": _("Ramdisk ID"),
    "image_state": _("Euca2ools state"),
    "project_id": _("Project ID"),
    "image_type": _("Image Type"),
}
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
INSTANCE_LOG_LENGTH = 35
DROPDOWN_MAX_ITEMS = 30
TIME_ZONE = "Asia/Shanghai"
POLICY_FILES_PATH = '/etc/openstack-dashboard'
LOGGING = {
    'version': 1,
    'disable_existing_loggers': False,
    'formatters': {
        'operation': {
            'format': '%(asctime)s %(message)s'
        },
    },
    'handlers': {
        'null': {
            'level': 'DEBUG',
            'class': 'logging.NullHandler',
        },
        'console': {
            'level': 'INFO',
            'class': 'logging.StreamHandler',
        },
        'operation': {
            'level': 'INFO',
            'class': 'logging.StreamHandler',
            'formatter': 'operation',
        },
    },
    'loggers': {
        'django.db.backends': {
            'handlers': ['null'],
            'propagate': False,
        },
        'requests': {
            'handlers': ['null'],
            'propagate': False,
        },
        'horizon': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'horizon.operation_log': {
            'handlers': ['operation'],
            'level': 'INFO',
            'propagate': False,
        },
        'openstack_dashboard': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'novaclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'cinderclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'keystoneclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'glanceclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'neutronclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'heatclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'swiftclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'openstack_auth': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'nose.plugins.manager': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'django': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'iso8601': {
            'handlers': ['null'],
            'propagate': False,
        },
        'scss': {
            'handlers': ['null'],
            'propagate': False,
        },
    },
}
SECURITY_GROUP_RULES = {
    'all_tcp': {
        'name': _('All TCP'),
        'ip_protocol': 'tcp',
        'from_port': '1',
        'to_port': '65535',
    },
    'all_udp': {
        'name': _('All UDP'),
        'ip_protocol': 'udp',
        'from_port': '1',
        'to_port': '65535',
    },
    'all_icmp': {
        'name': _('All ICMP'),
        'ip_protocol': 'icmp',
        'from_port': '-1',
        'to_port': '-1',
    },
    'ssh': {
        'name': 'SSH',
        'ip_protocol': 'tcp',
        'from_port': '22',
        'to_port': '22',
    },
    'smtp': {
        'name': 'SMTP',
        'ip_protocol': 'tcp',
        'from_port': '25',
        'to_port': '25',
    },
    'dns': {
        'name': 'DNS',
        'ip_protocol': 'tcp',
        'from_port': '53',
        'to_port': '53',
    },
    'http': {
        'name': 'HTTP',
        'ip_protocol': 'tcp',
        'from_port': '80',
        'to_port': '80',
    },
    'pop3': {
        'name': 'POP3',
        'ip_protocol': 'tcp',
        'from_port': '110',
        'to_port': '110',
    },
    'imap': {
        'name': 'IMAP',
        'ip_protocol': 'tcp',
        'from_port': '143',
        'to_port': '143',
    },
    'ldap': {
        'name': 'LDAP',
        'ip_protocol': 'tcp',
        'from_port': '389',
        'to_port': '389',
    },
    'https': {
        'name': 'HTTPS',
        'ip_protocol': 'tcp',
        'from_port': '443',
        'to_port': '443',
    },
    'smtps': {
        'name': 'SMTPS',
        'ip_protocol': 'tcp',
        'from_port': '465',
        'to_port': '465',
    },
    'imaps': {
        'name': 'IMAPS',
        'ip_protocol': 'tcp',
        'from_port': '993',
        'to_port': '993',
    },
    'pop3s': {
        'name': 'POP3S',
        'ip_protocol': 'tcp',
        'from_port': '995',
        'to_port': '995',
    },
    'ms_sql': {
        'name': 'MS SQL',
        'ip_protocol': 'tcp',
        'from_port': '1433',
        'to_port': '1433',
    },
    'mysql': {
        'name': 'MYSQL',
        'ip_protocol': 'tcp',
        'from_port': '3306',
        'to_port': '3306',
    },
    'rdp': {
        'name': 'RDP',
        'ip_protocol': 'tcp',
        'from_port': '3389',
        'to_port': '3389',
    },
}
REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
                              'LAUNCH_INSTANCE_DEFAULTS',
                              'OPENSTACK_IMAGE_FORMATS',
                              'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN']
ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
 
完成安装
重启web服务器以及会话存储服务:
 
[root@openstack1 ~]# systemctl restart httpd.service memcached.service
 
验证仪表盘的操做。
 
在浏览器中输入 http://10.88.66.15/dashboard或http://43.239.121.156/dashboard/访问仪表盘。
 
验证使用 admin 或者``demo``用户凭证和``default``域凭证。
 
 
 
第八章 建立第一台Openstack云主机
 
建立第一台虚拟机
 
一、建立一个网络:
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack network create  --share --external --provider-physical-network provider --provider-network-type flat provider
[root@openstack1 ~]# openstack network create  --share --external -roviderhysical-network public -rovider-network-type flat public-net
 
 
[root@openstack1 ~]# openstack network list
+--------------------------------------+----------+--------------------------------------+
| ID                                   | Name     | Subnets                              |
+--------------------------------------+----------+--------------------------------------+
| 76ed28c8-4afd-41b7-ab9b-17cd8afa436e | provider | 22436418-64e1-47b8-aa48-01a0162a9ad9 |
+--------------------------------------+----------+--------------------------------------+
建立一个子网:
[root@openstack1 ~]# openstack subnet create --network provider --allocation-pool start=192.168.2.101,end=192.168.2.250 --dns-nameserver 114.114.114.114 --gateway 192.168.2.1 --subnet-range 192.168.2.0/24 provider
[root@openstack1 ~]# openstack subnet create --network public-net --allocationool start=192.168.56.20,end=192.168.56.200 --dns-nameserver 223.5.5.5 --gateway 192.168.56.1  --subnet-range 192.168.56.0/24 public-net
 
 
[root@openstack1 ~]# openstack subnet  list   ----查看网络和子网
+--------------------------------------+----------+--------------------------------------+----------------+
| ID                                   | Name     | Network                              | Subnet         |
+--------------------------------------+----------+--------------------------------------+----------------+
| 22436418-64e1-47b8-aa48-01a0162a9ad9 | provider | 76ed28c8-4afd-41b7-ab9b-17cd8afa436e | 192.168.2.0/24 |
+--------------------------------------+----------+--------------------------------------+----------------+
 
[root@openstack1 ~]# openstack subnet  show 22436418-64e1-47b8-aa48-01a0162a9ad9
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.2.101-192.168.2.250          |
| cidr                    | 192.168.2.0/24                       |
| created_at              | 2019-02-19T03:15:34Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.2.1                          |
| host_routes             |                                      |
| id                      | 22436418-64e1-47b8-aa48-01a0162a9ad9 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | provider                             |
| network_id              | 76ed28c8-4afd-41b7-ab9b-17cd8afa436e |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T03:15:34Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
建立m1.nano规格的主机
默认的最小规格的主机须要512 MB内存。对于环境中计算节点内存不足4 GB的,咱们推荐建立只须要64 MB的``m1.nano``规格的主机。若单纯为了测试的目的,请使用``m1.nano``规格的主机来加载CirrOS镜像
 
[root@openstack1 ~]# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
+----------------------------+---------+
| Field                      | Value   |
+----------------------------+---------+
| OS-FLV-DISABLED:disabled   | False   |
| OS-FLV-EXT-DATA:ephemeral  | 0       |
| disk                       | 1       |
| id                         | 0       |
| name                       | m1.nano |
| os-flavor-access:is_public | True    |
| properties                 |         |
| ram                        | 64      |
| rxtx_factor                | 1.0     |
| swap                       |         |
| vcpus                      | 1       |
+----------------------------+---------+
 
 
 
生成一个键值对
 
 
导入租户``demo``的凭证
[root@openstack1 ~]# source demo-openstack.sh
 
生成和添加秘钥对:
[root@openstack1 ~]# ssh-keygen -q -N ""
Enter file in which to save the key (/root/.ssh/id_rsa):
[root@openstack1 ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
+-------------+-------------------------------------------------+
| Field       | Value                                           |
+-------------+-------------------------------------------------+
| fingerprint | 81:0d:c4:d2:7f:ea:ae:47:fd:c9:70:d4:98:cf:9e:cd |
| name        | mykey                                           |
| user_id     | 875898ea26d742e58161f248fd954752                |
+-------------+-------------------------------------------------+
 
[root@openstack1 ~]# openstack keypair list
+-------+-------------------------------------------------+
| Name  | Fingerprint                                     |
+-------+-------------------------------------------------+
| mykey | 81:0d:c4:d2:7f:ea:ae:47:fd:c9:70:d4:98:cf:9e:cd |
+-------+-------------------------------------------------+
 
 
增长安全组规则
 
添加规则到 default 安全组。
 
容许 ICMP (ping):
 
[root@openstack1 ~]# openstack security group rule create --protocol icmp default
+-------------------+--------------------------------------+
| Field             | Value                                |
+-------------------+--------------------------------------+
| created_at        | 2018-03-06T04:35:19Z                 |
| description       |                                      |
| direction         | ingress                              |
| ethertype         | IPv4                                 |
| headers           |                                      |
| id                | 2ef58ebf-b582-4f99-9936-184c40abe5fc |
| port_range_max    | None                                 |
| port_range_min    | None                                 |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| protocol          | icmp                                 |
| remote_group_id   | None                                 |
| remote_ip_prefix  | 0.0.0.0/0                            |
| revision_number   | 1                                    |
| security_group_id | 7e05baa2-3b19-42c5-85f6-c9f215ca28ce |
| updated_at        | 2018-03-06T04:35:19Z                 |
+-------------------+--------------------------------------+
 
容许安全 shell (SSH) 的访问:
[root@openstack1 ~]# openstack security group rule create --protocol tcp --dst-port 22 default
+-------------------+--------------------------------------+
| Field             | Value                                |
+-------------------+--------------------------------------+
| created_at        | 2018-03-06T04:36:48Z                 |
| description       |                                      |
| direction         | ingress                              |
| ethertype         | IPv4                                 |
| headers           |                                      |
| id                | 8b099d42-875a-45e6-bf2e-da3780b5453d |
| port_range_max    | 22                                   |
| port_range_min    | 22                                   |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| protocol          | tcp                                  |
| remote_group_id   | None                                 |
| remote_ip_prefix  | 0.0.0.0/0                            |
| revision_number   | 1                                    |
| security_group_id | 7e05baa2-3b19-42c5-85f6-c9f215ca28ce |
| updated_at        | 2018-03-06T04:36:48Z                 |
+-------------------+--------------------------------------+
 
 
 
 
 
肯定实例选项
启动一台实例,您必须至少指定一个类型、镜像名称、网络、安全组、密钥和实例名称。
 
列出可用类型:
[root@openstack1 ~]# openstack flavor list
+----+---------+-----+------+-----------+-------+-----------+
| ID | Name    | RAM | Disk | Ephemeral | VCPUs | Is Public |
+----+---------+-----+------+-----------+-------+-----------+
| 0  | m1.nano |  64 |    1 |         0 |     1 | True      |
+----+---------+-----+------+-----------+-------+-----------+
 
列出可用镜像:
 
[root@openstack1 ~]# openstack image list
+--------------------------------------+-----------------------+--------+
| ID                                   | Name                  | Status |
+--------------------------------------+-----------------------+--------+
| 64b3a04f-0d1f-4dee-8604-5f33d7769d22 | CentOS-6.9-x86_64-min | active |
| d464af77-9588-43e7-a3d4-3f5f26000030 | cirros                | active |
+--------------------------------------+-----------------------+--------+
 
 
列出可用网络:
 
[root@openstack1 ~]# openstack network list
+--------------------------------------+------------+--------------------------------------+
| ID                                   | Name       | Subnets                              |
+--------------------------------------+------------+--------------------------------------+
| cab15ce1-cf69-4fbd-ba1f-88f525e98e0f | public-net | d632d023-1911-4ad3-b806-1bf8a7089771 |
+--------------------------------------+------------+--------------------------------------+
 
 
列出可用的安全组:
 
[root@openstack1 ~]# openstack security group list
+--------------------------------------+---------+------------------------+----------------------------------+
| ID                                   | Name    | Description            | Project                          |
+--------------------------------------+---------+------------------------+----------------------------------+
| 7e05baa2-3b19-42c5-85f6-c9f215ca28ce | default | Default security group | e0c3f7cb756c4ab79cf92fc99d8f073b |
+--------------------------------------+---------+------------------------+----------------------------------+
 
 
建立虚拟机:
[root@openstack1 ~]# openstack server create --flavor m1.nano --image cirros --nic net-id=cab15ce1-cf69-4fbd-ba1f-88f525e98e0f --security-group default --key-name mykey provider-instance
+--------------------------------------+-----------------------------------------------+
| Field                                | Value                                         |
+--------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                        |
| OS-EXT-AZ:availability_zone          |                                               |
| OS-EXT-STS:power_state               | NOSTATE                                       |
| OS-EXT-STS:task_state                | scheduling                                    |
| OS-EXT-STS:vm_state                  | building                                      |
| OS-SRV-USG:launched_at               | None                                          |
| OS-SRV-USG:terminated_at             | None                                          |
| accessIPv4                           |                                               |
| accessIPv6                           |                                               |
| addresses                            |                                               |
| adminPass                            | PLftyqaoyC86                                  |
| config_drive                         |                                               |
| created                              | 2018-03-06T05:02:50Z                          |
| flavor                               | m1.nano (0)                                   |
| hostId                               |                                               |
| id                                   | d8517ee2-4b40-4128-b087-cd76efcdd78c          |
| image                                | cirros (d464af77-9588-43e7-a3d4-3f5f26000030) |
| key_name                             | mykey                                         |
| name                                 | provider-instance                             |
| os-extended-volumes:volumes_attached | []                                            |
| progress                             | 0                                             |
| project_id                           | e0c3f7cb756c4ab79cf92fc99d8f073b              |
| properties                           |                                               |
| security_groups                      | [{u'name': u'default'}]                       |
| status                               | BUILD                                         |
| updated                              | 2018-03-06T05:02:51Z                          |
| user_id                              | 875898ea26d742e58161f248fd954752              |
+--------------------------------------+-----------------------------------------------+
查看建立的虚拟机状态:
 
[root@openstack1 ~]# openstack server list
+--------------------------------------+-------------------+--------+--------------------------+------------+
| ID                                   | Name              | Status | Networks                 | Image Name |
+--------------------------------------+-------------------+--------+--------------------------+------------+
| 94d530ed-5baf-41a1-bd6a-323d55abe65b | provider-instance | ACTIVE | public-net=192.168.56.24 | cirros     |
+--------------------------------------+-------------------+--------+--------------------------+------------+
 
[root@openstack1 ~]# ssh cirros@192.168.56.23
$ ifconfig
eth0      Link encap:Ethernet  HWaddr FA:16:3E:54:44:41 
          inet addr:192.168.56.23  Bcast:192.168.56.255  Mask:255.255.255.0
          inet6 addr: fe80::f816:3eff:fe54:4441/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:3207 errors:0 dropped:0 overruns:0 frame:0
          TX packets:148 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:198632 (193.9 KiB)  TX bytes:14166 (13.8 KiB)
 
lo        Link encap:Local Loopback 
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:16436  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
 
$ whoami
cirros
 
cirros镜像默认的账号密码
user:cirros
password:cubswin:)
 
已建立成功而且能够登陆了
 
----------------------------------------------------------------------------------------------------
 
用命令获取虚拟机的url地址:
 
[root@openstack1 ~]# nova get-vnc-console provider-instance novnc
+-------+------------------------------------------------------------------------------------+
| Type  | Url                                                                                |
+-------+------------------------------------------------------------------------------------+
| novnc | http://10.88.66.15:6080/vnc_auto.html?token=f79e53d8-1c2d-460d-b1fa-694ee0102f40 |
+-------+------------------------------------------------------------------------------------+
 
----------------------------------------------------------------------------------------------------
 
在浏览器中输入:http://10.88.66.15:6080/vnc_auto.html?token=f79e53d8-1c2d-460d-b1fa-694ee0102f40
 
则能够登陆到虚拟机。
 
关于报错:
1.openstack安装dashboard后访问horizon出错
 
访问http://10.88.66.15/dashboard出错500:internal server error
 
解决办法:
编辑:/etc/httpd/conf.d/openstack-dashboard.conf
在WSGISocketPrefix run/wsgi下面加一行代码:
WSGIApplicationGroup %{GLOBAL}
保存,重启httpd服务。
 
2.登录云主机控制台提示:Booting from Hard Disk
 
解决:计算节点必须配置 libvirt 来使用 QEMU 去代替 KVM
 
[root@openstack2 ~]# vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu
cpu_mode=none
重启相关服务
[root@openstack2 ~]#systemctl restart libvirtd.service openstack-nova-compute.service
 
3.dashboard仪表盘没法登录
 
浏览器输入 http://controller/dashboard 访问仪表盘没法登录,提示“出错啦!
 
遇到异常状况,请刷新。如需帮助请联系管理员。”
# tail -n 40 /var/log/apache2/error.log
[Wed Mar 15 22:56:22.744149 2017] [:error] [pid 2733] Login successful for user "admin".
[Wed Mar 15 22:56:34.220718 2017] [:error] [pid 2733] Internal Server Error: /dashboard/auth/login/
[Wed Mar 15 22:56:34.220920 2017] [:error] [pid 2733] Traceback (most recent call last):
[Wed Mar 15 22:56:34.220935 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/core/handlers/base.py", line 132, in get_response
[Wed Mar 15 22:56:34.220943 2017] [:error] [pid 2733]     response = wrapped_callback(request, *callback_args, **callback_kwargs)
[Wed Mar 15 22:56:34.220951 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/debug.py", line 76, in sensitive_post_parameters_wrapper
[Wed Mar 15 22:56:34.220959 2017] [:error] [pid 2733]     return view(request, *args, **kwargs)
[Wed Mar 15 22:56:34.220966 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/utils/decorators.py", line 110, in _wrapped_view
[Wed Mar 15 22:56:34.220973 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.220981 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/cache.py", line 57, in _wrapped_view_func
[Wed Mar 15 22:56:34.221043 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221052 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/openstack_auth/views.py", line 103, in login
[Wed Mar 15 22:56:34.221059 2017] [:error] [pid 2733]     **kwargs)
[Wed Mar 15 22:56:34.221067 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/debug.py", line 76, in sensitive_post_parameters_wrapper
[Wed Mar 15 22:56:34.221075 2017] [:error] [pid 2733]     return view(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221082 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/utils/decorators.py", line 110, in _wrapped_view
[Wed Mar 15 22:56:34.221089 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221095 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/cache.py", line 57, in _wrapped_view_func
[Wed Mar 15 22:56:34.221102 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221109 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/views.py", line 51, in login
[Wed Mar 15 22:56:34.221179 2017] [:error] [pid 2733]     auth_login(request, form.get_user())
[Wed Mar 15 22:56:34.221206 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/__init__.py", line 110, in login
[Wed Mar 15 22:56:34.221214 2017] [:error] [pid 2733]     request.session.cycle_key()
[Wed Mar 15 22:56:34.221221 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/base.py", line 285, in cycle_key
[Wed Mar 15 22:56:34.221228 2017] [:error] [pid 2733]     self.create()
[Wed Mar 15 22:56:34.221269 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/cache.py", line 48, in create
[Wed Mar 15 22:56:34.221281 2017] [:error] [pid 2733]     "Unable to create a new session key. "
[Wed Mar 15 22:56:34.221288 2017] [:error] [pid 2733] RuntimeError: Unable to create a new session key. It is likely that the cache is unavailable.
[root@openstack1 ~]# tail -f /var/log/httpd/error_log
[Wed Mar 15 22:56:34.221102 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221109 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/views.py", line 51, in login
[Wed Mar 15 22:56:34.221179 2017] [:error] [pid 2733]     auth_login(request, form.get_user())
[Wed Mar 15 22:56:34.221206 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/__init__.py", line 110, in login
[Wed Mar 15 22:56:34.221214 2017] [:error] [pid 2733]     request.session.cycle_key()
[Wed Mar 15 22:56:34.221221 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/base.py", line 285, in cycle_key
[Wed Mar 15 22:56:34.221228 2017] [:error] [pid 2733]     self.create()
[Wed Mar 15 22:56:34.221269 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/cache.py", line 48, in create
[Wed Mar 15 22:56:34.221281 2017] [:error] [pid 2733]     "Unable to create a new session key. "
[Wed Mar 15 22:56:34.221288 2017] [:error] [pid 2733] RuntimeError: Unable to create a new session key. It is likely that the cache is unavailable.
 
 
解决:更改dashboard的local_settings配置文件并重启httpd和memcached,刷新后可正常登录,这是在国外的论坛上找到得解决方法。  
 
# vim /etc/openstack-dashboard/local_settings
#SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
# systemctl restart httpd memcached
# systemctl status httpd memcached
 
 
 
扩展:最经常使用的使用Linuxbridge驱动的vlan网络模型
在上面网络配置的基础上进行修改
编辑/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2_type_vlan]
...
network_vlan_ranges = default:1:4000,external:1:4000
[ml2]
...
tenant_network_types = vlan
 
 
[root@openstack1 ~]# grep -v '^#\|^$' /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
type_drivers = flat,vlan,gre,vxlan,geneve
tenant_network_types = vlan
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = external
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
network_vlan_ranges = default:1:4000,external:1:4000
[ml2_type_vxlan]
[securitygroup]
enable_ipset = true
 
 
[root@openstack1 ~]# grep -v '^#\|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
[root@openstack2 ~]# grep -v '^#\|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
重启控制节点和计算节点的网络服务
systemctl restart neutron-server neutron-metadata-agent neutron-linuxbridge-agent neutron-l3-agent neutron-dhcp-agent
[root@openstack2 ~]# systemctl restart neutron-linuxbridge-agent.service
 
建立基于vlan的网络
[root@openstack1 ~]# openstack network create  --share --external --provider-physical-network default --provider-network-type vlan vlan99
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        |                                      |
| created_at                | 2019-02-19T06:58:07Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan99                               |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 149                                  |
| qos_policy_id             | None                                 |
| revision_number           | 3                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tags                      |                                      |
| updated_at                | 2019-02-19T06:58:07Z                 |
+---------------------------+--------------------------------------+
 
[root@openstack1 ~]# openstack subnet create --network vlan99 --allocation-pool start=192.168.99.101,end=192.168.99.250 --dns-nameserver 114.114.114.114 --gateway 192.168.99.1 --subnet-range 192.168.99.0/24 vlan99-sub
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.99.101-192.168.99.250        |
| cidr                    | 192.168.99.0/24                      |
| created_at              | 2019-02-19T07:01:34Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.99.1                         |
| host_routes             |                                      |
| id                      | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan99-sub                           |
| network_id              | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:01:34Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
 
[root@openstack1 ~]# openstack network list
+--------------------------------------+--------+--------------------------------------+
| ID                                   | Name   | Subnets                              |
+--------------------------------------+--------+--------------------------------------+
| 4c530c4c-6bb5-4927-9983-ea58aba82b42 | vlan99 | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
+--------------------------------------+--------+--------------------------------------+
 
 
[root@openstack1 ~]# openstack network show 4c530c4c-6bb5-4927-9983-ea58aba82b42
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        | nova                                 |
| created_at                | 2019-02-19T06:58:07Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan99                               |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 149                                  |
| qos_policy_id             | None                                 |
| revision_number           | 4                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
| tags                      |                                      |
| updated_at                | 2019-02-19T07:01:34Z                 |
+---------------------------+--------------------------------------+
 
 
[root@openstack1 ~]# openstack subnet list
+--------------------------------------+------------+--------------------------------------+-----------------+
| ID                                   | Name       | Network                              | Subnet          |
+--------------------------------------+------------+--------------------------------------+-----------------+
| c31248d4-31a9-4c54-b26b-23c2ba8be758 | vlan99-sub | 4c530c4c-6bb5-4927-9983-ea58aba82b42 | 192.168.99.0/24 |
+--------------------------------------+------------+--------------------------------------+-----------------+
 
 
[root@openstack1 ~]# openstack subnet show c31248d4-31a9-4c54-b26b-23c2ba8be758
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.99.101-192.168.99.250        |
| cidr                    | 192.168.99.0/24                      |
| created_at              | 2019-02-19T07:01:34Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.99.1                         |
| host_routes             |                                      |
| id                      | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan99-sub                           |
| network_id              | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:01:34Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
哈哈,结果好像不是我须要的vlan99  
[root@openstack1 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540019041e    no        eth1.149
                            tap715df6be-b8
[root@openstack2 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540030bc71    no        eth1.149
                            tap8e5772bb-66
[root@openstack2 ~]# virsh list
Id    Name                           State
----------------------------------------------------
3     instance-0000000a              running
 
[root@openstack2 ~]# virsh domiflist instance-0000000a
Interface  Type       Source     Model       MAC
-------------------------------------------------------
tap8e5772bb-66 bridge     brq4c530c4c-6b virtio      fa:16:3e:20:a8:72
 
 
咱们在平台上建立个vlan100试试
 
[root@openstack1 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540019041e    no        eth1.149
                            tap715df6be-b8
brq5e0a1e0d-17        8000.52540019041e    no        eth1.100
                            tape498ff08-59
平台建立的没有毛病了
 
[root@openstack2 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540030bc71    no        eth1.149
                            tap8e5772bb-66
brq5e0a1e0d-17        8000.52540030bc71    no        eth1.100
                            tap2d0a57c6-cb
 
[root@openstack2 ~]# virsh list --all
Id    Name                           State
----------------------------------------------------
3     instance-0000000a              running
5     instance-0000000c              running
 
[root@openstack2 ~]# virsh domiflist instance-0000000c
Interface  Type       Source     Model       MAC
-------------------------------------------------------
tap2d0a57c6-cb bridge     brq5e0a1e0d-17 virtio      fa:16:3e:c8:14:8a
 
[root@openstack1 ~]# openstack network show 5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        | nova                                 |
| created_at                | 2019-02-19T07:21:35Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | 5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan100                              |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 100                                  |
| qos_policy_id             | None                                 |
| revision_number           | 4                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   | 2dd900a9-f1d0-42e1-9c2d-b4a81237e29f |
| tags                      |                                      |
| updated_at                | 2019-02-19T07:23:27Z                 |
+---------------------------+--------------------------------------+
 
[root@openstack1 ~]# openstack subnet show 2dd900a9-f1d0-42e1-9c2d-b4a81237e29f
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.100.101-192.168.100.210      |
| cidr                    | 192.168.100.0/24                     |
| created_at              | 2019-02-19T07:23:27Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.100.1                        |
| host_routes             |                                      |
| id                      | 2dd900a9-f1d0-42e1-9c2d-b4a81237e29f |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan-sub                             |
| network_id              | 5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:23:27Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
那么咱们如何使用命令行来建立vlan呢?把最开始的哪些命令稍做修改并添加和vlan相关的参数-provider-segment,咱们在建立个vlan101试试
[root@openstack1 ~]# openstack network create  --share --external --provider-physical-network default --provider-network-type vlan --provider-segment 101 vlan101
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        |                                      |
| created_at                | 2019-02-19T07:36:26Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | a0328240-60bc-4d18-8092-05a3882ee13d |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan101                              |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 101                                  |
| qos_policy_id             | None                                 |
| revision_number           | 3                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tags                      |                                      |
| updated_at                | 2019-02-19T07:36:26Z                 |
+---------------------------+--------------------------------------+
[root@openstack1 ~]# openstack subnet create --network vlan101 --allocation-pool start=192.168.101.101,end=192.168.101.250 --dns-nameserver 114.114.114.114 --gateway 192.168.101.1 --subnet-range 192.168.101.0/24 vlan101-sub
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.101.101-192.168.101.250      |
| cidr                    | 192.168.101.0/24                     |
| created_at              | 2019-02-19T07:43:15Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.101.1                        |
| host_routes             |                                      |
| id                      | 7152d7e2-fc83-4597-b9f5-af58aa3de9e2 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan101-sub                          |
| network_id              | a0328240-60bc-4d18-8092-05a3882ee13d |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:43:15Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
[root@openstack1 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540019041e    no        eth1.149
                            tap715df6be-b8
brq5e0a1e0d-17        8000.52540019041e    no        eth1.100
                            tape498ff08-59
brqa0328240-60        8000.023e6f041beb    no        eth1.101
                            tap8fd757d3-8d、
 
[root@openstack1 ~]# ip netns list
qdhcp-a0328240-60bc-4d18-8092-05a3882ee13d (id: 2)
qdhcp-5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd (id: 1)
qdhcp-4c530c4c-6bb5-4927-9983-ea58aba82b42 (id: 0)
[root@openstack1 ~]# ip netns exec qdhcp-a0328240-60bc-4d18-8092-05a3882ee13d ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ns-8fd757d3-8d@if13: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether fa:16:3e:97:1e:75 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 192.168.101.101/24 brd 192.168.101.255 scope global ns-8fd757d3-8d
       valid_lft forever preferred_lft forever
    inet 169.254.169.254/16 brd 169.254.255.255 scope global ns-8fd757d3-8d
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe97:1e75/64 scope link
       valid_lft forever preferred_lft forever
 
 
好了,若是咱们把建立的那些子网换成公网地址,服务器的eth1网卡对接的交换机端口都是trunk的,而且网关指向公网网关
那么咱们的云平台彻底能够把建立的云主机和外网沟通。由于云主机从子网分配到了就是公网地址。
 
 
报错汇总
错误:_init__() got an unexpected keyword argument 'user_domain_name'
解决
这个通常是用户密码错误,清除环境变量试试
执行
unset OS_AUTH_URL OS_PASSWORD OS_TOKEN