硬件需求html
操做系统:node
Centos7
内核版本:python
[root@controller ~]# uname -m x86_64 [root@controller ~]# uname -r 3.10.0-693.21.1.el7.x86_64
节点间以及网卡配置mysql
controller节点linux
[root@controller ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens6f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a0 brd ff:ff:ff:ff:ff:ff inet 10.71.11.12/24 brd 10.71.11.255 scope global ens6f0 valid_lft forever preferred_lft forever inet6 fe80::ffc8:8166:c284:eaa3/64 scope link valid_lft forever preferred_lft forever 3: ens6f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a1 brd ff:ff:ff:ff:ff:ff 4: ens6f2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a2 brd ff:ff:ff:ff:ff:ff 5: ens6f3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether ac:85:3d:bd:73:a3 brd ff:ff:ff:ff:ff:ff 6: bond0: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether 6e:28:d0:af:fe:b3 brd ff:ff:ff:ff:ff:ff
compute节点web
[root@compute ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens6f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e0 brd ff:ff:ff:ff:ff:ff inet 10.71.11.13/24 brd 10.71.11.255 scope global ens6f0 valid_lft forever preferred_lft forever inet6 fe80::4e66:a096:a692:765d/64 scope link valid_lft forever preferred_lft forever 3: ens6f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e1 brd ff:ff:ff:ff:ff:ff 4: ens6f2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e2 brd ff:ff:ff:ff:ff:ff 5: ens6f3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:b4:0f:e3 brd ff:ff:ff:ff:ff:ff 6: bond0: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether 0e:1d:f6:3d:f3:40 brd ff:ff:ff:ff:ff:ff
存储Cinder节点sql
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens6f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:29 brd ff:ff:ff:ff:ff:ff inet 10.71.11.14/24 brd 10.71.11.255 scope global ens6f0 valid_lft forever preferred_lft forever inet6 fe80::b358:ad47:b704:c86/64 scope link valid_lft forever preferred_lft forever 3: ens6f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:2a brd ff:ff:ff:ff:ff:ff 4: ens6f2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:2b brd ff:ff:ff:ff:ff:ff 5: ens6f3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000 link/ether 30:d1:7e:af:ae:2c brd ff:ff:ff:ff:ff:ff 6: bond0: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN qlen 1000 link/ether 86:2e:1d:fc:9c:c7 brd ff:ff:ff:ff:ff:ff
说明:这次部署搭建采用三台物理节点手搭建社区openstack Queens环境shell
OpenStack项目是一个开源云计算平台,支持全部类型的云环境。该项目旨在实现简单,大规模的可扩展性和丰富的功能。数据库
OpenStack经过各类补充服务提供基础架构即服务(IaaS)解决方案。每项服务都提供了一个应用程序编程接口(API),以促进这种集成。apache
本文涵盖了使用适用于具备足够Linux经验的OpenStack新用户的功能性示例体系结构,逐步部署主要OpenStack服务。只用于学习OpenStack最小化环境。
1.概念性架构
下图显示了OpenStack服务之间的关系:
2.逻辑体系结构
下图显示了OpenStack云中最多见但不是惟一可能的体系结构:
对于设计,部署和配置OpenStack,学习者必须了解逻辑体系结构。
如概念架构所示,OpenStack由几个独立的部分组成,称为OpenStack服务。全部服务都经过keystone服务进行身份验证。
各个服务经过公共API相互交互,除非须要特权管理员命令。
在内部,OpenStack服务由多个进程组成。全部服务都至少有一个API进程,它监听API请求,预处理它们并将它们传递给服务的其余部分。除身份服务外,实际工做由不一样的流程完成。
对于一个服务的进程之间的通讯,使用AMQP消息代理。该服务的状态存储在数据库中。部署和配置OpenStack云时,您能够选择多种消息代理和数据库解决方案,例如RabbitMQ,MySQL,MariaDB和SQLite。
用户能够经过Horizon Dashboard实现的基于Web的用户界面,经过命令行客户端以及经过浏览器插件或curl等工具发布API请求来访问OpenStack。对于应用程序,有几个SDK可用。最终,全部这些访问方法都会对各类OpenStack服务发出REST API调用。
1.配置节点网卡IP(略)
2.设置主机名
hostnamectl set-hostname 主机名 bash ##使设置当即生效
3.配置域名解析,编辑编辑/etc/hosts文件,加入以下配置
10.71.11.12 controller 10.71.11.13 compute 10.71.11.14 cinder
4.验证网络连通性
在控制节点执行
root@controller ~]# ping -c 4 openstack.org PING openstack.org (162.242.140.107) 56(84) bytes of data. 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=1 ttl=46 time=248 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=2 ttl=46 time=248 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=3 ttl=46 time=248 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=4 ttl=46 time=248 ms [root@controller ~]# ping -c 4 compute PING compute (10.71.11.13) 56(84) bytes of data. 64 bytes from compute (10.71.11.13): icmp_seq=1 ttl=64 time=0.395 ms 64 bytes from compute (10.71.11.13): icmp_seq=2 ttl=64 time=0.214 ms
在计算节点执行
[root@compute ~]# ping -c 4 openstack.org PING openstack.org (162.242.140.107) 56(84) bytes of data. 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=1 ttl=46 time=249 ms 64 bytes from 162.242.140.107 (162.242.140.107): icmp_seq=2 ttl=46 time=248 ms [root@compute ~]# ping -c 4 controller PING controller (10.71.11.12) 56(84) bytes of data. 64 bytes from controller (10.71.11.12): icmp_seq=1 ttl=64 time=0.237 ms
5.配置阿里yum源
备份
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
下载
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
或者
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
6.安装NTP时钟服务(全部节点)
##controller节点##
安装软件包
yum install chrony -y
编辑/etc/chrony.conf文件,配置时钟源同步服务端
server controlelr iburst ##全部节点向controller节点同步时间 allow 10.71.11.0/24 ##设置时间同步网段
设置NTP服务开机启动
systemctl enable chronyd.service systemctl start chronyd.service
其余节点
安装软件包
yum install chrony -y
配置全部节点指向controller同步时间
vi /etc/chrony.conf server controlelr iburst
重启NTP服(略)
验证时钟同步服务
在controller节点执行
[root@controller ~]# chronyc sources 210 Number of sources = 4 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* time4.aliyun.com 2 10 377 1015 +115us[ +142us] +/- 14ms ^- ntp8.flashdance.cx 2 10 347 428 +27ms[ +27ms] +/- 259ms ^- 85.199.214.101 1 10 377 988 +38ms[ +38ms] +/- 202ms ^- ntp7.flashdance.cx 2 10 367 836 +35ms[ +35ms] +/- 247ms MS列中的内容应该指明* NTP服务当前同步的服务器。
在其余节点执行
[root@compute ~]# chronyc sources 210 Number of sources = 4 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* leontp.ccgs.wa.edu.au 1 10 377 752 +49ms[ +49ms] +/- 121ms ^+ ntp5.flashdance.cx 2 10 373 1155 +15ms[ +16ms] +/- 258ms ^+ 85.199.214.101 1 10 377 46m -22ms[ -21ms] +/- 164ms ^+ ntp8.flashdance.cx 2 10 333 900 -6333us[-5976us] +/- 257ms [root@cinder ~]# chronyc sources 210 Number of sources = 4 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^+ 61-216-153-104.HINET-IP.> 3 10 377 748 -3373us[-3621us] +/- 87ms ^- 85.199.214.100 1 10 377 876 +37ms[ +36ms] +/- 191ms ^* 61-216-153-106.HINET-IP.> 3 10 377 869 +774us[ +527us] +/- 95ms ^- makaki.miuku.net 2 10 377 384 +30ms[ +30ms] +/- 254ms
注意:平常运维中常常碰见时钟飘逸问题,致使集群服务脑裂
说明:无特殊说明,如下操做在全部节点上执行
1.下载安装openstack软件仓库(queens版本)
yum install centos-release-openstack-queens -y
2.更新全部节点软件包
yum upgrade
3.安装openstack client端
yum install python-openstackclient -y
4.安装openstack-selinux
yum install openstack-selinux -y
大多数OpenStack服务使用SQL数据库来存储信息,数据库一般在控制器节点上运行。 本文主要使用MariaDB或MySQL。
安装软件包
yum install mariadb mariadb-server python2-PyMySQL -y
编辑/etc/my.cnf.d/mariadb-server.cnf并完成如下操做
[root@controller ~]# vi /etc/my.cnf.d/mariadb-server.cnf # # These groups are read by MariaDB server. # Use it for options that only the server (but not clients) should see # # See the examples of server my.cnf files in /usr/share/mysql/ # # this is read by the standalone daemon and embedded servers [server] # this is only for the mysqld standalone daemon # Settings user and group are ignored when systemd is used. # If you need to run mysqld under a different user or group, # customize your systemd unit file for mysqld/mariadb according to the # instructions in http://fedoraproject.org/wiki/Systemd [mysqld] datadir=/var/lib/mysql socket=/var/lib/mysql/mysql.sock log-error=/var/log/mariadb/mariadb.log pid-file=/var/run/mariadb/mariadb.pid bind-address = 10.71.11.12 default-storage-engine = innodb innodb_file_per_table = on max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8
说明:bind-address使用controller节点的管理IP
设置服务开机启动
systemctl enable mariadb.service systemctl start mariadb.service
经过运行mysql_secure_installation脚原本保护数据库服务。
[root@controller ~]# mysql_secure_installation NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB SERVERS IN PRODUCTION USE! PLEASE READ EACH STEP CAREFULLY! In order to log into MariaDB to secure it, we'll need the current password for the root user. If you've just installed MariaDB, and you haven't set the root password yet, the password will be blank, so you should just press enter here. Enter current password for root (enter for none): OK, successfully used password, moving on... Setting the root password ensures that nobody can log into the MariaDB root user without the proper authorisation. Set root password? [Y/n] New password: Re-enter new password: Password updated successfully! Reloading privilege tables.. ... Success! By default, a MariaDB installation has an anonymous user, allowing anyone to log into MariaDB without having to have a user account created for them. This is intended only for testing, and to make the installation go a bit smoother. You should remove them before moving into a production environment. Remove anonymous users? [Y/n] ... Success! Normally, root should only be allowed to connect from 'localhost'. This ensures that someone cannot guess at the root password from the network. Disallow root login remotely? [Y/n] ... Success! By default, MariaDB comes with a database named 'test' that anyone can access. This is also intended only for testing, and should be removed before moving into a production environment. Remove test database and access to it? [Y/n] - Dropping test database... ... Success! - Removing privileges on test database... ... Success! Reloading the privilege tables will ensure that all changes made so far will take effect immediately. Reload privilege tables now? [Y/n] ... Success! Cleaning up... All done! If you've completed all of the above steps, your MariaDB installation should now be secure. Thanks for using MariaDB!
1.安装配置消息队列组件
yum install rabbitmq-server -y
2.设置服务开机启动
systemctl enable rabbitmq-server.service;systemctl start rabbitmq-server.service
3.添加openstack 用户
rabbitmqctl add_user openstack 123456
4.openstack用户的权限配置
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
说明:服务的身份认证服务使用Memcached缓存令牌。 memcached服务一般在控制器节点上运行。 对于生产部署,咱们建议启用防火墙,身份验证和加密的组合来保护它。
1.安装配置组件
yum install memcached python-memcached -y
2.编辑/etc/sysconfig/memcached
vi /etc/sysconfig/memcached OPTIONS="-l 10.71.11.12,::1,controller"
3.设置服务开机启动
systemctl enable memcached.service;systemctl start memcached.service
1.安装服务
yum install etcd -y
2.编辑/etc/etcd/etcd.conf文件
vi /etc/etcd/etcd.conf ETCD_INITIAL_CLUSTER ETCD_INITIAL_ADVERTISE_PEER_URLS ETCD_ADVERTISE_CLIENT_URLS ETCD_LISTEN_CLIENT_URLS #[Member] ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="http://10.71.11.12:2380" ETCD_LISTEN_CLIENT_URLS="http://10.71.11.12:2379" ETCD_NAME="controller" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.71.11.12:2380" ETCD_ADVERTISE_CLIENT_URLS="http://10.71.11.12:2379" ETCD_INITIAL_CLUSTER="controller=http://10.71.11.12:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01" ETCD_INITIAL_CLUSTER_STATE="new"
3.设置服务开机启动
systemctl enable etcd;systemctl start etcd
1.建立keystone数据库并受权
mysql -u root -p CREATE DATABASE keystone; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
2.安装、配置组件
yum install openstack-keystone httpd mod_wsgi -y
3.编辑 /etc/keystone/keystone.conf
[database] connection = mysql+pymysql://keystone:123456@controller/keystone [token] provider = fernet
4.同步keystone数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
5.数据库初始化
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
6.引导身份认证服务
keystone-manage bootstrap --bootstrap-password 123456 --bootstrap-admin-url http://controller:35357/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
1.编辑/etc/httpd/conf/httpd.conf,配置ServerName参数
ServerName controller
2.建立 /usr/share/keystone/wsgi-keystone.conf连接文件
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
3.设置服务开机启动
systemctl enable httpd.service;systemctl start httpd.service
启动服务报错
[root@controller ~]# systemctl start httpd.service Job for httpd.service failed because the control process exited with error code. See "systemctl status httpd.service" and "journalctl -xe" for details. [root@controller ~]# journalctl -xe Apr 01 02:31:03 controller systemd[1]: [/usr/lib/systemd/system/memcached.service:62] Unknown lvalue 'ProtectControlGroups' in section 'Service' Apr 01 02:31:03 controller systemd[1]: [/usr/lib/systemd/system/memcached.service:65] Unknown lvalue 'RestrictRealtime' in section 'Service' Apr 01 02:31:03 controller systemd[1]: [/usr/lib/systemd/system/memcached.service:72] Unknown lvalue 'RestrictNamespaces' in section 'Service' Apr 01 02:31:03 controller polkitd[928]: Unregistered Authentication Agent for unix-process:18932:9281785 (system bus name :1.157, object path /org/freedeskt Apr 01 02:31:09 controller polkitd[928]: Registered Authentication Agent for unix-process:18952:9282349 (system bus name :1.158 [/usr/bin/pkttyagent --notify Apr 01 02:31:09 controller systemd[1]: Starting The Apache HTTP Server... -- Subject: Unit httpd.service has begun start-up -- Defined-By: systemd -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel -- -- Unit httpd.service has begun starting up. Apr 01 02:31:09 controller httpd[18958]: (13)Permission denied: AH00072: make_sock: could not bind to address [::]:5000 Apr 01 02:31:09 controller httpd[18958]: (13)Permission denied: AH00072: make_sock: could not bind to address 0.0.0.0:5000 Apr 01 02:31:09 controller httpd[18958]: no listening sockets available, shutting down Apr 01 02:31:09 controller httpd[18958]: AH00015: Unable to open logs Apr 01 02:31:09 controller systemd[1]: httpd.service: main process exited, code=exited, status=1/FAILURE Apr 01 02:31:09 controller kill[18960]: kill: cannot find process "" Apr 01 02:31:09 controller systemd[1]: httpd.service: control process exited, code=exited status=1 Apr 01 02:31:09 controller systemd[1]: Failed to start The Apache HTTP Server. -- Subject: Unit httpd.service has failed -- Defined-By: systemd -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel -- -- Unit httpd.service has failed. -- -- The result is failed. Apr 01 02:31:09 controller systemd[1]: Unit httpd.service entered failed state. Apr 01 02:31:09 controller systemd[1]: httpd.service failed. Apr 01 02:31:09 controller polkitd[928]: Unregistered Authentication Agent for unix-process:18952:9282349 (system bus name :1.158, object path /org/freedeskt
通过判断,是SELinux引起的问题
解决办法:关闭防火墙
[root@controller ~]# vi /etc/selinux/config # This file controls the state of SELinux on the system. # SELINUX= can take one of these three values: # enforcing - SELinux security policy is enforced. # permissive - SELinux prints warnings instead of enforcing. # disabled - No SELinux policy is loaded. SELINUX=disabled # SELINUXTYPE= can take one of three two values: # targeted - Targeted processes are protected, # minimum - Modification of targeted policy. Only selected processes are protected. # mls - Multi Level Security protection. SELINUXTYPE=targeted
再次重启服务报错解决
[root@controller ~]# systemctl enable httpd.service;systemctl start httpd.service
4.配置administrative帐号
export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_PROJECT_NAME=admin export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_DOMAIN_NAME=Default export OS_AUTH_URL=http://controller:35357/v3 export OS_IDENTITY_API_VERSION=3
1.建立域
openstack domain create --description "Domain" example [root@controller ~]# openstack domain create --description "Domain" example +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Domain | | enabled | True | | id | 199658b1d0234c3cb8785c944aa05780 | | name | example | | tags | [] | +-------------+----------------------------------+
openstack project create --domain default --description "Service Project" service [root@controller ~]# openstack project create --domain default --description "Service Project" service +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Service Project | | domain_id | default | | enabled | True | | id | 03e700ff43e44b29b97365bac6c7d723 | | is_domain | False | | name | service | | parent_id | default | | tags | [] | +-------------+----------------------------------+
3.建立平台demo项目
openstack project create --domain default --description "Demo Project" demo [root@controller ~]# openstack project create --domain default --description "Demo Project" demo +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Demo Project | | domain_id | default | | enabled | True | | id | 61f8c9005ca84477b5bdbf485be1a546 | | is_domain | False | | name | demo | | parent_id | default | | tags | [] | +-------------+----------------------------------+
4.建立demo用户
openstack user create --domain default --password-prompt demo [root@controller ~]# openstack user create --domain default --password-prompt demo User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | fa794c034a53472c827a94e6a6ad12c1 | | name | demo | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
5.建立用户角色
openstack role create user [root@controller ~]# openstack role create user +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | None | | id | 15ea413279a74770b79630b75932a596 | | name | user | +-----------+----------------------------------+
6.添加用户角色到demo项目和用户
openstack role add --project demo --user demo user
说明:此条命令执行成功后不返回参数
1.取消环境变量
unset OS_AUTH_URL OS_PASSWORD
2.admin用户返回的认证token
[root@controller ~]# unset OS_AUTH_URL OS_PASSWORD [root@controller ~]# openstack --os-auth-url http://controller:35357/v3 \ > --os-project-domain-name Default --os-user-domain-name Default \ > --os-project-name admin --os-username admin token issue Password: +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2018-04-01T07:45:18+0000 | | id | gAAAAABawH_-ke3POs9LLzpEEH3Wziuk6VlQmNZCtxlDovLaSmg_-dOOUSDWsF-gw9we4QvcHzdO5Ahc3eEdDl6sIztZ60QQTG3x5Kbt_75EbWCZsBa2HkybZ-nJYuN4o3tQugse2BDcs8HF7bT1pAtoW0UM29RQNlCMdvx9jfcIT4EBit1SMKM | | project_id | 4205b649750d4ea68ff5bea73de0faae | | user_id | 475b31138acc4cc5bb42ca64af418963 | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
3.demo用户返回的认证token
[root@controller ~]# openstack --os-auth-url http://controller:5000/v3 \ > --os-project-domain-name Default --os-user-domain-name Default \ > --os-project-name demo --os-username demo token issue Password: +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2018-04-01T07:45:58+0000 | | id | gAAAAABawIAmwGuiyDMjhqTmkwgDi0hKyj55WCDaMdPvyr4H8ZJbBNt7cUTtQ2AEHdP8Z_PRB4RI0uiJIvtOoMI0DUmMrKsmZU5G95tKY4y-kXPvvqdd8_JdUvQN4MgCStb-ZZ3OpNwN6500C891M8DTA6W1pWR8julBNaFrEQdlllhreOfdLc4 | | project_id | 61f8c9005ca84477b5bdbf485be1a546 | | user_id | fa794c034a53472c827a94e6a6ad12c1 | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
1.建立admin-openrc脚本
export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
2.建立demo-openrc脚本
export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=demo export OS_USERNAME=demo export OS_PASSWORD=123456 export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
3.使用脚本,返回认证token
[root@controller ~]# openstack token issue +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | expires | 2018-04-01T08:17:29+0000 | | id | gAAAAABawIeJ0z-3R2ltY6ublCGqZX80AIi4tQUxqEpw0xvPsFP9BLV8ALNsB2B7bsVivGB14KvhUncdoRl_G2ng5BtzVKAfzHyB-OxwiXeqAttkpQsuLCDKRHd3l-K6wRdaDqfNm-D1QjhtFoxHOTotOcjtujBHF12uP49TjJtl1Rrd6uVDk0g | | project_id | 4205b649750d4ea68ff5bea73de0faae | | user_id | 475b31138acc4cc5bb42ca64af418963 | +------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
1.建立glance数据库,并受权
mysql -u root -p CREATE DATABASE glance; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '123456';
2.获取admin用户的环境变量,并建立服务认证
. admin-openrc
建立glance用户
[root@controller ~]# openstack user create --domain default --password-prompt glance User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | dd2363d365624c998dfd788b13e1282b | | name | glance | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
把admin用户添加到glance用户和项目中
openstack role add --project service --user glance admin
说明:此条命令执行不返回不返回
建立glance服务
[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Image | | enabled | True | | id | 5927e22c745449869ff75b193ed7d7c6 | | name | glance | | type | image | +-------------+----------------------------------+
3.建立镜像服务API端点
[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 0822449bf80f4f6897be5e3240b6bfcc | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 5927e22c745449869ff75b193ed7d7c6 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | f18ae583441b4d118526571cdc204d8a | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 5927e22c745449869ff75b193ed7d7c6 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 79eadf7829274b1b9beb2bfb6be91992 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 5927e22c745449869ff75b193ed7d7c6 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+
1.安装软件包
yum install openstack-glance -y
2.编辑/etc/glance/glance-api.conf文件
[database] connection = mysql+pymysql://glance:123456@controller/glance [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = glance password = 123456 [paste_deploy] flavor = keystone [glance_store] stores = file,http default_store = file filesystem_store_datadir = /var/lib/glance/images/
3.编辑/etc/glance/glance-registry.conf
[database] connection = mysql+pymysql://glance:123456@controller/glance [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = glance password = 123456 [paste_deploy] flavor = keystone
4.同步镜像服务数据库
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service systemctl start openstack-glance-api.service openstack-glance-registry.service
使用CirrOS验证Image服务的操做,这是一个小型Linux映像,可帮助您测试OpenStack部署。
有关如何下载和构建映像的更多信息,请参阅OpenStack虚拟机映像指南https://docs.openstack.org/image-guide/
有关如何管理映像的信息,请参阅OpenStack最终用户指南https://docs.openstack.org/queens/user/
1.获取admin用户的环境变量,且下载镜像
. admin-openrc wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
2.上传镜像
使用QCOW2磁盘格式,裸容器格式和公开可见性将图像上传到Image服务,以便全部项目均可以访问它:
[root@controller ~]# openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | f8ab98ff5e73ebab884d80c9dc9c7290 | | container_format | bare | | created_at | 2018-04-01T08:00:05Z | | disk_format | qcow2 | | file | /v2/images/916faa2b-e292-46e0-bfe4-0f535069a1a0/file | | id | 916faa2b-e292-46e0-bfe4-0f535069a1a0 | | min_disk | 0 | | min_ram | 0 | | name | cirros | | owner | 4205b649750d4ea68ff5bea73de0faae | | protected | False | | schema | /v2/schemas/image | | size | 13267968 | | status | active | | tags | | | updated_at | 2018-04-01T08:00:06Z | | virtual_size | None | | visibility | public | +------------------+------------------------------------------------------+
3.查看上传的镜像
[root@controller ~]# openstack image list +--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | 916faa2b-e292-46e0-bfe4-0f535069a1a0 | cirros | active | +--------------------------------------+--------+--------+
说明:glance具体配置选项:https://docs.openstack.org/glance/queens/configuration/index.html
1.建立nova_api, nova, nova_cell0数据库
mysql -u root -p CREATE DATABASE nova_api; CREATE DATABASE nova; CREATE DATABASE nova_cell0;
数据库登陆受权
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '123456';
2.建立nova用户
[root@controller ~]# . admin-openrc [root@controller ~]# openstack user create --domain default --password-prompt nova User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 8e72103f5cc645669870a630ffb25065 | | name | nova | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
3.添加admin用户为nova用户
openstack role add --project service --user nova admin
4.建立nova服务端点
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Compute | | enabled | True | | id | 9f8f8d8cb8e542b09694bee6016cc67c | | name | nova | | type | compute | +-------------+----------------------------------+
5.建立compute API 服务端点
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | cf260d5a56344c728840e2696f44f9bc | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 9f8f8d8cb8e542b09694bee6016cc67c | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | f308f29a78e04b888c7418e78c3d6a6d | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 9f8f8d8cb8e542b09694bee6016cc67c | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 022d96fa78de4b73b6212c09f13d05be | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 9f8f8d8cb8e542b09694bee6016cc67c | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+----------------------------------+
建立一个placement服务用户
[root@controller ~]# openstack user create --domain default --password-prompt placement User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | fa239565fef14492ba18a649deaa6f3c | | name | placement | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+
6.添加placement用户为项目服务admin角色
openstack role add --project service --user placement admin
7.建立在服务目录建立Placement API服务
[root@controller ~]# openstack service create --name placement --description "Placement API" placement +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Placement API | | enabled | True | | id | 32bb1968c08747ccb14f6e4a20cd509e | | name | placement | | type | placement | +-------------+----------------------------------+
8.建立Placement API服务端点
[root@controller ~]# openstack endpoint create --region RegionOne placement public http://controller:8778 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | b856962188484f4ba6fad500b26b00ee | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 32bb1968c08747ccb14f6e4a20cd509e | | service_name | placement | | service_type | placement | | url | http://controller:8778 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 62e5a3d82a994f048a8bb8ddd1adc959 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 32bb1968c08747ccb14f6e4a20cd509e | | service_name | placement | | service_type | placement | | url | http://controller:8778 | +--------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | f12f81ff7b72416aa5d035b8b8cc2605 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 32bb1968c08747ccb14f6e4a20cd509e | | service_name | placement | | service_type | placement | | url | http://controller:8778 | +--------------+----------------------------------+
1.安装软件包
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
2.编辑 /etc/nova/nova.conf
[DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:123456@controller my_ip = 10.71.11.12 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] connection = mysql+pymysql://nova:123456@controller/nova_api [database] connection = mysql+pymysql://nova:123456@controller/nova [api] auth_strategy = keystone [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = 123456 [vnc] enabled = true server_listen = $my_ip server_proxyclient_address = $my_ip [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] os_region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:35357/v3 username = placement password = 123456
3.因为软件包的一个bug,须要在/etc/httpd/conf.d/00-nova-placement-api.conf文件中添加以下配置
<Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory>
4.从新http服务
systemctl restart httpd
5.同步nova-api数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
同步数据库报错
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova Traceback (most recent call last): File "/usr/bin/nova-manage", line 10, in <module> sys.exit(main()) File "/usr/lib/python2.7/site-packages/nova/cmd/manage.py", line 1597, in main config.parse_args(sys.argv) File "/usr/lib/python2.7/site-packages/nova/config.py", line 52, in parse_args default_config_files=default_config_files) File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 2502, in __call__ else sys.argv[1:]) File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 3166, in _parse_cli_opts return self._parse_config_files() File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 3183, in _parse_config_files ConfigParser._parse_file(config_file, namespace) File "/usr/lib/python2.7/site-packages/oslo_config/cfg.py", line 1950, in _parse_file raise ConfigFileParseError(pe.filename, str(pe)) oslo_config.cfg.ConfigFileParseError: Failed to parse /etc/nova/nova.conf: at /etc/nova/nova.conf:8, No ':' or '=' found in assignment: '/etc/nova/nova.conf'
根据报错,把/etc/nova/nova.conf中第八行注释掉,解决报错
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning
6.注册cell0数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova [root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning
7.建立cell1 cell
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning 6c689e8c-3e13-4e6d-974c-c2e4e22e510b
8.同步nova数据库
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning /usr/lib/python2.7/site-packages/pymysql/cursors.py:165: Warning: (1831, u'Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release.') result = self._query(query) /usr/lib/python2.7/site-packages/pymysql/cursors.py:165: Warning: (1831, u'Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release.') result = self._query(query)
9.验证 nova、 cell0、 cell1数据库是否注册正确
[root@controller ~]# nova-manage cell_v2 list_cells /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning +-------+--------------------------------------+------------------------------------+-------------------------------------------------+ | Name | UUID | Transport URL | Database Connection | +-------+--------------------------------------+------------------------------------+-------------------------------------------------+ | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 | | cell1 | 6c689e8c-3e13-4e6d-974c-c2e4e22e510b | rabbit://openstack:****@controller | mysql+pymysql://nova:****@controller/nova | +-------+--------------------------------------+------------------------------------+-------------------------------------------------+
10.设置服务为开机启动
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
1.安装软件包
yum install openstack-nova-compute
2.编辑/etc/nova/nova.conf
[DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:123456@controller my_ip = 10.71.11.13 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api] auth_strategy = keystone [keystone_authtoken] auth_uri = http://10.71.11.12:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = 123456 [vnc] enabled = True server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://controller:6080/vnc_auto.html [glance] api_servers = http://controller:9292 [oslo_concurrency] lock_path = /var/lib/nova/tmp [placement] os_region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:35357/v3 username = placement password = 123456
3.设置服务开机启动
systemctl enable libvirtd.service openstack-nova-compute.service systemctl start libvirtd.service openstack-nova-compute.service
说明:若是nova-compute服务没法启动,请检查/var/log/nova/nova-compute.log,会出现以下报错信息
2018-04-01 12:03:43.362 18612 INFO os_vif [-] Loaded VIF plugins: ovs, linux_bridge 2018-04-01 12:03:43.431 18612 WARNING oslo_config.cfg [-] Option "use_neutron" from group "DEFAULT" is deprecated for removal ( nova-network is deprecated, as are any related configuration options. ). Its value may be silently ignored in the future. 2018-04-01 12:03:43.609 18612 INFO nova.virt.driver [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] Loading compute driver 'libvirt.LibvirtDriver' 2018-04-01 12:03:43.825 18612 WARNING oslo_config.cfg [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] Option "firewall_driver" from group "DEFAULT" is deprecated for removal ( nova-network is deprecated, as are any related configuration options. ). Its value may be silently ignored in the future. 2018-04-01 12:03:43.832 18612 WARNING os_brick.initiator.connectors.remotefs [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] Connection details not present. RemoteFsClient may not initialize properly. 2018-04-01 12:03:43.938 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 1 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:45.042 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 2 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:47.140 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 4 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:51.244 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 6 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:03:57.351 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 8 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH 2018-04-01 12:04:05.458 18612 ERROR oslo.messaging._drivers.impl_rabbit [req-8f3c2d77-ea29-49ca-933b-bfd4179552dc - - - - -] [683db769-0ab2-4e92-b19e-d2b711c8fadf] AMQP server on controller:5672 is unreachable: [Errno 113] EHOSTUNREACH. Trying again in 10 seconds. Client port: None: error: [Errno 113] EHOSTUNREACH @ "/var/log/nova/nova-compute.log" 947L, 240212C
控制器:5672上的错误消息AMQP服务器没法访问可能表示控制器节点上的防火墙阻止了对端口5672的访问。配置防火墙以在控制器节点上打开端口5672,并在计算节点上从新启动nova-compute服务。
清除controller的防火墙
[root@controller ~]# iptables -F [root@controller ~]# iptables -X [root@controller ~]# iptables -Z
重启计算服务成功
4.添加compute节点到cell数据库(controller)
验证有几个计算节点在数据库中
[root@controller ~]. admin-openrc [root@controller ~]# openstack compute service list --service nova-compute +----+--------------+---------+------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+--------------+---------+------+---------+-------+----------------------------+ | 8 | nova-compute | compute | nova | enabled | up | 2018-04-01T22:24:14.000000 | +----+--------------+---------+------+---------+-------+----------------------------+
5.发现计算节点
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning Found 2 cell mappings. Skipping cell0 since it does not contain hosts. Getting compute nodes from cell 'cell1': 6c689e8c-3e13-4e6d-974c-c2e4e22e510b Found 1 unmapped computes in cell: 6c689e8c-3e13-4e6d-974c-c2e4e22e510b Checking host mapping for compute host 'compute': 32861a0d-894e-4af9-a57c-27662d27e6bd Creating host mapping for compute host 'compute': 32861a0d-894e-4af9-a57c-27662d27e6b
1.列出服务组件
[root@controller ~]#. admin-openrc [root@controller ~]# openstack compute service list +----+------------------+----------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+------------------+----------------+----------+---------+-------+----------------------------+ | 1 | nova-consoleauth | controller | internal | enabled | up | 2018-04-01T22:25:29.000000 | | 2 | nova-conductor | controller | internal | enabled | up | 2018-04-01T22:25:33.000000 | | 3 | nova-scheduler | controller | internal | enabled | up | 2018-04-01T22:25:30.000000 | | 6 | nova-conductor | ansible-server | internal | enabled | up | 2018-04-01T22:25:55.000000 | | 7 | nova-scheduler | ansible-server | internal | enabled | up | 2018-04-01T22:25:59.000000 | | 8 | nova-compute | compute | nova | enabled | up | 2018-04-01T22:25:34.000000 | | 9 | nova-consoleauth | ansible-server | internal | enabled | up | 2018-04-01T22:25:57.000000 | +----+------------------+----------------+----------+---------+-------+----------------------------+
2.列出身份服务中的API端点以验证与身份服务的链接:
[root@controller ~]# openstack catalog list +-----------+-----------+-----------------------------------------+ | Name | Type | Endpoints | +-----------+-----------+-----------------------------------------+ | placement | placement | RegionOne | | | | internal: http://controller:8778 | | | | RegionOne | | | | public: http://controller:8778 | | | | RegionOne | | | | admin: http://controller:8778 | | | | | | keystone | identity | RegionOne | | | | public: http://controller:5000/v3/ | | | | RegionOne | | | | admin: http://controller:35357/v3/ | | | | RegionOne | | | | internal: http://controller:5000/v3/ | | | | | | glance | image | RegionOne | | | | public: http://controller:9292 | | | | RegionOne | | | | admin: http://controller:9292 | | | | RegionOne | | | | internal: http://controller:9292 | | | | | | nova | compute | RegionOne | | | | admin: http://controller:8774/v2.1 | | | | RegionOne | | | | public: http://controller:8774/v2.1 | | | | RegionOne | | | | internal: http://controller:8774/v2.1 | | | | | +-----------+-----------+-----------------------------------------+
3.列出镜像
[root@controller ~]# openstack image list
+--------------------------------------+--------+--------+
| ID | Name | Status |
+--------------------------------------+--------+--------+
| 916faa2b-e292-46e0-bfe4-0f535069a1a0 | cirros | active |
+--------------------------------------+--------+--------+
4.检查cells和placement API是否正常
[root@controller ~]# nova-status upgrade check /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported exception.NotSupportedWarning Option "os_region_name" from group "placement" is deprecated. Use option "region-name" from group "placement". +---------------------------+ | Upgrade Check Results | +---------------------------+ | Check: Cells v2 | | Result: Success | | Details: None | +---------------------------+ | Check: Placement API | | Result: Success | | Details: None | +---------------------------+ | Check: Resource Providers | | Result: Success | | Details: None | +---------------------------+
nova知识点https://docs.openstack.org/nova/queens/admin/index.html
1.建立nuetron数据库和受权
mysql -u root -p CREATE DATABASE neutron; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456';
2.建立服务
. admin-openrc openstack user create --domain default --password-prompt neutron
添加admin角色为neutron用户
openstack role add --project service --user neutron admin
建立neutron服务
openstack service create --name neutron --description "OpenStack Networking" network
3.建立网络服务端点
openstack endpoint create --region RegionOne network public http://controller:9696 openstack endpoint create --region RegionOne network internal http://controller:9696 openstack endpoint create --region RegionOne network admin http://controller:969
1.安装组件
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
2.配置服务组件,编辑 /etc/neutron/neutron.conf
[database] connection = mysql+pymysql://neutron:123456@controller/neutron [DEFAULT] auth_strategy = keystone core_plugin = ml2 service_plugins = transport_url = rabbit://openstack:123456@controller notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = 123456 [nova] auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
编辑/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2] type_drivers = flat,vlan tenant_network_types = mechanism_drivers = linuxbridge extension_drivers = port_security [ml2_type_flat] flat_networks = provider [securitygroup] enable_ipset = true
编辑 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge] physical_interface_mappings = provider:ens6f0 [vxlan] enable_vxlan = false [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
编辑 /etc/neutron/dhcp_agent.ini
[DEFAULT] interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true
编辑 /etc/neutron/metadata_agent.ini
DEFAULT] nova_metadata_host = controller metadata_proxy_shared_secret = 123456
编辑/etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456
service_metadata_proxy = true
metadata_proxy_shared_secret = 123456
1.建立服务软链接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
2.同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
3.重启compute API服务
systemctl restart openstack-nova-api.service
4.配置网络服务开机启动
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
1.安装组件
yum install openstack-neutron-linuxbridge ebtables ipset
2.配置公共组件
编辑/etc/neutron/neutron.conf
[DEFAULT] auth_strategy = keystone transport_url = rabbit://openstack:123456@controller [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
1.配置Linux网桥,编辑 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge] physical_interface_mappings = provider:ens6f0 [vxlan] enable_vxlan = false [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
编辑/etc/nova/nova.conf
[neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = 123456
1.重启compute服务
systemctl restart openstack-nova-compute.service
2.设置网桥服务开机启动
systemctl enable neutron-linuxbridge-agent.service systemctl start neutron-linuxbridge-agent.service
1.安装软件包
yum install openstack-dashboard -y
编辑/etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller" ALLOWED_HOSTS = ['*']
配置memcache会话存储
SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'controller:11211', } }
开启身份认证API 版本v3
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HO
开启domains版本支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
配置API版本
OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 2, } OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default" : OPENSTACK_NEUTRON_NETWORK = { 'enable_router': False, 'enable_quotas': False, 'enable_distributed_router': False, 'enable_ha_router': False, 'enable_lb': False, 'enable_firewall': False, 'enable_***': False, 'enable_fip_topology_check': False, }
2.完成安装,重启web服务和会话存储
systemctl restart httpd.service memcached.service
在浏览器输入http://10.71.11.12/dashboard.,访问openstack的web页面
default admin 123456
本节介绍如何为Block Storage服务安装和配置存储节点。 为简单起见,此配置使用空的本地块存储设备引用一个存储节点。
该服务使用LVM驱动程序在该设备上配置逻辑卷,并经过iSCSI传输将其提供给实例。 您能够按照这些说明进行小的修改,以便使用其余存储节点水平扩展您的环境。
1.安装支持的软件包
安装LVM
yum install lvm2 device-mapper-persistent-data
设置LVM服务开机启动
systemctl enable lvm2-lvmetad.service systemctl start lvm2-lvmetad.service
2.建立LVM物理逻辑卷/dev/sdb
[root@cinder ~]# pvcreate /dev/sdb1 Device /dev/sdb not found (or ignored by filtering).
解决方案:
编辑 vim /etc/lvm/lvm.conf,找到global_filter一行,配置以下
global_filter = [ "a|.*/|","a|sdb1|"]
以后再执行pvcreate命令,问题解决。
[root@cinder ~]# pvcreate /dev/sdb1 Physical volume "/dev/sdb1" successfully created.
3.建立cinder-volumes逻辑卷组
[root@cinder ~]# vgcreate cinder-volumes /dev/sdb1 Volume group "cinder-volumes" successfully created
4.安装和配置组件
安装软件包
yum install openstack-cinder targetcli python-keystone -y
编辑/etc/cinder/cinder.conf
[DEFAULT] transport_url = rabbit://openstack:123456@controller auth_strategy = keystone my_ip = 10.71.11.14 enabled_backends = lvm glance_api_servers = http://controller:9292 [database] connection = mysql+pymysql://cinder:123456@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_id = default user_domain_id = default project_name = service username = cinder password = 123456 在[lvm]部分中,使用LVM驱动程序,cinder-volumes卷组,iSCSI协议和相应的iSCSI服务配置LVM后端。 若是[lvm]部分不存在,请建立它: [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes iscsi_protocol = iscsi iscsi_helper = lioadm [oslo_concurrency] lock_path = /var/lib/cinder/tmp
设置存储服务开机启动
systemctl enable openstack-cinder-volume.service target.service systemctl start openstack-cinder-volume.service target.service
[root@controller ~]# openstack image create --disk-format qcow2 --container-format bare --public --file /root/CentOS-7-x86_64-Minimal-1708.iso CentOS-7-x86_64
. admin-openrc openstack network create --share --external --provider-physical-network provider --provider-network-type flat provider
参数
--share 容许全部项目使用虚拟网络
--external 定义外接虚拟网络 若是须要建立外网使用 --internal
--provider-physical-network provider && --provider-network-type flat 链接flat 虚拟网络
openstack subnet create --network provider --allocation-pool start=10.71.11.50,end=10.71.11.60 --dns-nameserver 114.114.114.114 --gateway 10.71.11.254 --subnet-range 10.71.11.0/24 provider
openstack flavor create --id 1 --vcpus 4 --ram 128 --disk 1 m2.nano
. demo-openrc ssh-keygen -q -N "" openstack keypair create --public-key ~/.ssh/id_rsa.pub liukey
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default
openstack flavor list