OpenStack-Queens详细安装部署(一)基础配置

一、系统设置

1、网卡设置(所有节点操作)

[root@DT_Node-172_17_7_1 ~]# echo "TYPE=Ethernet
NM_CONTROLLED=no
BOOTPROTO=none
NAME=eth1
DEVICE=eth1
ONBOOT=yes
HWADDR=$(ip link show eth1|awk 'NR==2{print $2}')" > /etc/sysconfig/network-scripts/ifcfg-eth1
root@DT_Node-172_17_7_1 ~]# systemctl restart network
[root@DT_Node-172_17_7_1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:ba:06:db brd ff:ff:ff:ff:ff:ff
    inet 172.17.7.1/21 brd 172.17.7.255 scope global dynamic eth0
       valid_lft 451sec preferred_lft 451sec
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:ba:39:c0 brd ff:ff:ff:ff:ff:ff
[root@DT_Node-172_17_7_1 ~]# 

2、系统hosts设置(所有节点操作)

[root@DT_Node-172_17_7_1 ~]# echo '
# Controller-Node
172.17.7.1        controller1
172.17.7.2        controller2
172.17.7.3        controller3
172.17.7.100      controller           #HA VIP

# Compute-Node & Storage-Node
172.17.7.4        compute1    ceph-osd1
172.17.7.5        compute2    ceph-osd2
172.17.7.6        compute3    ceph-osd3
172.17.7.7        compute4    ceph-osd4

' >> /etc/hosts
[root@DT_Node-172_17_7_1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

# Controller-Node
172.17.7.1        controller1
172.17.7.2        controller2
172.17.7.3        controller3
172.17.7.100      controller           #HA VIP

# Compute-Node & Storage-Node
172.17.7.4        compute1    ceph-osd1
172.17.7.5        compute2    ceph-osd2
172.17.7.6        compute3    ceph-osd3
172.17.7.7        compute4    ceph-osd4

OpenStack-Queens详细安装部署(一)基础配置

3、NTP时钟同步设置(所有节点)

[root@DT_Node-172_17_7_1 ~]# crontab -l && date

*/5 * * * * /usr/sbin/ntpdate -u ntp.dtops.cc >/dev/null 2>&1
*/1 * * * * /usr/sbin/ss  -tan|awk 'NR>1{++S[$1]}END{for (a in S) print a,S[a]}' > /tmp/tcp-status.txt
*/1 * * * * /usr/sbin/ss -o state established '( dport = :http or sport = :http )' |grep -v Netid > /tmp/httpNUB.txt
2018年 09月 09日 星期日 17:04:29 CST
[root@DT_Node-172_17_7_1 ~]# 

4、安装OpenStack软件包(所有节点)

[root@DT_Node-172_17_7_1 ~]# yum install -y centos-release-openstack-queens       #安装Queens版yum源
[root@DT_Node-172_17_7_1 ~]# yum upgrade -y                                       #系统更新
[root@DT_Node-172_17_7_1 ~]# yum install -y python-openstackclient                #安装openstackclient
[root@DT_Node-172_17_7_1 ~]# yum install -y openstack-selinux                     #selinux开启时需要安装openstack-selinux,这里已将seliux设置为默认关闭

5、配置节点SSH免密登录(所有节点)

[root@DT_Node-172_17_7_1 ~]# ssh-keygen  -t rsa -f ~/.ssh/id_rsa  -P ''     #生成秘钥
Generating public/private rsa key pair.
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:tEDd0QYCTN0/9+LHC7LiC7wz0zxf9ENolWI5LdNnWTU root@DS-VM-Node172_17_7_1.cluster.com
The key's randomart image is:
+---[RSA 2048]----+
|     o++.oo+   E+|
|     .. o.o o + =|
|      . .  o B *o|
|       o .  + O..|
|        S    * o |
|      .     o + .|
|       oo  . + = |
|       +o=  + o +|
|       .=o=o   o.|
+----[SHA256]-----+
[root@DT_Node-172_17_7_1 ~]# for i in 1 2 3 4 5 6 7;do ssh-copy-id -p22992 -i ~/.ssh/id_rsa.pub root@172.17.7.$i; done

6、防火墙规则

# Simple static firewall loaded by iptables.service. Replace
# this with your own custom rules, run lokkit, or switch to 
# shorewall or firewalld as your needs dictate.
*filter
:INPUT DROP [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 22,22992 -m comment --comment "SSH_PORT" -j ACCEPT
-A INPUT -s 180.150.132.57/32             -p tcp -m tcp -m state --state NEW -m multiport --dports 22,22992 -m comment --comment "RACKSPACE物理机公网VPN跳板_SSH_PORT" -j ACCEPT

# mariadb
# tcp3306:服务监听端口; tcp&udp4567:tcp做数据同步复制,多播复制同时采用tcp与udp;tcp4568:增量状态传输;tcp4444:其他状态快照传输;tcp9200:心跳检测
-A INPUT -s 172.17.0.0/21                 -p tcp -m tcp -m state --state NEW -m multiport --dports 3306,4444,4567:4568 -m comment --comment "MariaDB_Cluster_Port" -j ACCEPT
-A INPUT -s 172.17.0.0/21                 -p udp -m udp -m state --state NEW -m multiport --dports 4567 -m comment --comment "MariaDB_Cluster_UDP-Port" -j ACCEPT
-A INPUT -s 172.17.0.0/21                 -p tcp -m tcp -m state --state NEW -m multiport --dports 9200 -m comment --comment "MariaDB_Cluster_Check_Port" -j ACCEPT

# rabbitmq
# tcp4369:集群邻居发现;
# tcp5671,5672:用于AMQP 0.9.1 and 1.0 clients使用;
# tcp5673:非rabbitmq默认使用端口,这里用作hapoxy前端监听端口,避免后端服务与haproxy在1个节点时无法启动的问题;如果使用rabbitmq本身的集群机制,则可不设置此端口;
# tcp15672:用于http api与rabbitadmin访问,后者仅限在management plugin开启时;
# tcp25672:用于erlang分布式节点/工具通信
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 4369,5671:5673,15672:15673,25672 -m comment --comment "OpenStack_RabbitMQ_Port" -j ACCEPT

# pcs
# tcp2224:pcs web管理服务监听端口,可通过web新建,查看,删除资源等,端口值在/usr/lib/pcsd/ssl.rb文件中设置;
# udp5405:中间件corosync服务集群多播通信端口
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 2224 -m comment --comment "OpenStack_PCS_WEB_Manage" -j ACCEPT
-A INPUT                                  -p udp -m udp -m state --state NEW -m multiport --dports 5404:5405 -m comment --comment "OpenStack_PCS_Corosync_Port" -j ACCEPT

# HAproxy
# tcp1080:HAproxy的WEB短裤
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 1080 -m comment --comment "OpenStack_HAproxy_WEB_Proxy" -j ACCEPT

# keystone
# tcp35357:admin-api端口;
# tcp5000:public/internal-api端口
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 35357 -m comment --comment "OpenStack_Keystone_Admin-API_Prot" -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 5000 -m comment --comment "OpenStack_Keystone_PUB-API_Prot" -j ACCEPT

# glance
# tcp9191:glance-registry端口;
# tcp9292:glance-api端口
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 9191 -m comment --comment "OpenStack_Glance-Registry_Prot" -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 9292 -m comment --comment "OpenStack_Glance-API_Prot" -j ACCEPT

# nova
# tcp8773:nova-ec2-api端口;
# tcp8774:nova-compute-api端口;
# tcp8775:nova-metadata-api端口;
# tcp8778:placement-api端口;
# tcp6080:vncproxy端口
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 8773:8775 -m comment --comment "OpenStack_nova-ec2+compute+metadata-api_Prot" -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 8778 -m comment --comment "OpenStack_placement-api_Prot" -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 6080 -m comment --comment "OpenStack_vncproxy_Prot" -j ACCEPT

# neutron
# tcp9696:neutron-api端口;
# udp4789:vxlan目的端口
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 9696 -m comment --comment "OpenStack_neutron-api_Prot" -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 4789 -m comment --comment "OpenStack_vxlan_Prot" -j ACCEPT

# dashboard
# tcp80:dashboard监听端口
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 80 -m comment --comment "OpenStack_dashboard_Prot" -j ACCEPT

# cinder
# tcp8776:cinder-api端口
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 8776 -m comment --comment "OpenStack_cinder-api_Prot" -j ACCEPT

# ceph
# tcp6789:ceph-mon端口;
# tcp6800~7300:ceph-osd端口
# tcp8443: ceph web
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 6789  -m comment --comment "Ceph_MON_Port" -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 6800:7300  -m comment --comment "Ceph_OSD_Port" -j ACCEPT
-A INPUT                                  -p tcp -m tcp -m state --state NEW -m multiport --dports 8443  -m comment --comment "Ceph_OSD_Port" -j ACCEPT

-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
COMMIT

OpenStack-Queens详细安装部署(一)基础配置

7、HAproxy配置文件

[root@DT_Node-172_17_7_1 ~]# cat /etc/haproxy/haproxy.cfg
global
  log 127.0.0.1 local0
  log 127.0.0.1 local1 notice
  chroot  /var/lib/haproxy
  daemon
  group  haproxy
  user  haproxy
  maxconn  4000
  pidfile  /var/run/haproxy.pid

defaults
  log  global
  maxconn  4000
  option  redispatch
  retries  3
  timeout  http-request 10s
  timeout  queue 1m
  timeout  connect 10s
  timeout  client 1m
  timeout  server 1m
  timeout  check 10s

# haproxy监控页
listen stats
  bind 0.0.0.0:1080
  log 127.0.0.1 local0 err
  maxconn 10      #最大连接数
  mode http
  stats enable
  stats uri /
  stats realm OpenStack\ Haproxy
  stats auth admin:admin
  stats  refresh 30s
  stats  show-node
  stats  show-legends
  stats  hide-version
  stats admin if TRUE      #设置手工启动/禁用

# mariadb服务;
# 设置controller1节点为master,controller2/3节点为backup,一主多备的架构可规避数据不一致性;
listen galera_cluster
  bind 172.17.7.100:3306
  balance  source
  mode    tcp
  option httpchk
  server controller1 172.17.7.1:3306 check port 9200 inter 5000 fastinter 2000 rise 2 fall 2
  server controller2 172.17.7.2:3306 backup check port 9200 inter 5000 fastinter 2000 rise 2 fall 2
  server controller3 172.17.7.3:3306 backup check port 9200 inter 5000 fastinter 2000 rise 2 fall 2

listen galera_check
   bind 172.17.7.100:9200
   mode tcp
   option tcpka
   balance roundrobin
   timeout client  3h
   timeout server  3h
   option  clitcpka
   server controller1 172.17.7.1:9200 check inter 10s rise 2 fall 5
   server controller2 172.17.7.2:9200 check inter 10s rise 2 fall 5
   server controller3 172.17.7.3:9200 check inter 10s rise 2 fall 5

# 为rabbirmq提供ha集群访问端口,供openstack各服务访问;
# 如果openstack各服务直接连接rabbitmq集群,这里可不设置rabbitmq的负载均衡
 listen rabbitmq_cluster
   bind 172.17.7.100:5673
   mode tcp
   option tcpka
   balance roundrobin
   timeout client  3h
   timeout server  3h
   option  clitcpka
   server controller1 172.17.7.1:5672 check inter 10s rise 2 fall 5
   server controller2 172.17.7.2:5672 check inter 10s rise 2 fall 5
   server controller3 172.17.7.3:5672 check inter 10s rise 2 fall 5

# keystone_admin_internal_api服务
 listen keystone_admin_cluster
  bind 172.17.7.100:35357
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller1 172.17.7.1:35357 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:35357 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:35357 check inter 2000 rise 2 fall 5

# keystone_public _api服务
 listen keystone_public_cluster
  bind 172.17.7.100:5000
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller1 172.17.7.1:5000 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:5000 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:5000 check inter 2000 rise 2 fall 5

# glance_api服务
 listen glance_api_cluster
  bind 172.17.7.100:9292
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller1 172.17.7.1:9292 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:9292 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:9292 check inter 2000 rise 2 fall 5

# glance_registry服务
 listen glance_registry_cluster
  bind 172.17.7.100:9191
  balance  source
  option  tcpka
  option  tcplog
  server controller1 172.17.7.1:9191 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:9191 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:9191 check inter 2000 rise 2 fall 5

 listen nova_compute_api_cluster
  bind 172.17.7.100:8774
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller1 172.17.7.1:8774 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:8774 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:8774 check inter 2000 rise 2 fall 5

 listen nova_placement_cluster
  bind 172.17.7.100:8778
  balance  source
  option  tcpka
  option  tcplog
  server controller1 172.17.7.1:8778 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:8778 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:8778 check inter 2000 rise 2 fall 5

 listen nova_metadata_api_cluster
  bind 172.17.7.100:8775
  balance  source
  option  tcpka
  option  tcplog
  server controller1 172.17.7.1:8775 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:8775 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:8775 check inter 2000 rise 2 fall 5

 listen nova_vncproxy_cluster
  bind 172.17.7.100:6080
  balance  source
  option  tcpka
  option  tcplog
  server controller1 172.17.7.1:6080 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:6080 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:6080 check inter 2000 rise 2 fall 5

 listen neutron_api_cluster
  bind 172.17.7.100:9696
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller1 172.17.7.1:9696 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:9696 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:9696 check inter 2000 rise 2 fall 5

# horizon服务
 listen dashboard_cluster
  bind 172.17.7.100:80
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server controller1 172.17.7.1:80 check inter 2000 rise 2 fall 5
  server controller2 172.17.7.2:80 check inter 2000 rise 2 fall 5
  server controller3 172.17.7.3:80 check inter 2000 rise 2 fall 5

listen cinder_api_cluster
 bind 172.17.7.100:8776
 balance  source
 option  tcpka
 option  httpchk
 option  tcplog
 server controller1 172.17.7.1:8776 check inter 2000 rise 2 fall 5
 server controller2 172.17.7.2:8776 check inter 2000 rise 2 fall 5
 server controller3 172.17.7.3:8776 check inter 2000 rise 2 fall 5
[openstack-admin]-[root@DT_Node-172_17_7_1 ~]#

二、MariaDB集群配置

1、安装MariaDB Galare (所有Controller节点)

[root@DT_Node-172_17_7_1 ~]# bash -c "$(curl -Lks onekey.sh/mariadb_galera)"   #这里我用自己之前写的一键脚本来安装
Please input the root password of database: YTI1MTg4NGZiMGEzZTZmYTEw    #所有节点上的root密码都设置一样
[root@DT_Node-172_17_7_1 ~]# . /etc/profile.d/mariadb.sh
[root@DT_Node-172_17_7_1 ~]# mysql -uroot -pYTI1MTg4NGZiMGEzZTZmYTEw -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'YTI1MTg4NGZiMGEzZTZmYTEw' WITH GRANT OPTION;FLUSH PRIVILEGES;"
[root@DT_Node-172_17_7_1 ~]# mysql -uroot -pYTI1MTg4NGZiMGEzZTZmYTEw -e "GRANT ALL PRIVILEGES ON *.* TO 'replication'@'%' IDENTIFIED BY 'MWIxOWIzZWEyZjY5N2ZjMGI0';"
[root@DT_Node-172_17_7_1 ~]# mysql -uroot -pYTI1MTg4NGZiMGEzZTZmYTEw -e "SELECT User,Password,Host FROM mysql.user;"                                                                                      
+-------------+-------------------------------------------+-------------------------------------+
| User        | Password                                  | Host                                |
+-------------+-------------------------------------------+-------------------------------------+
| root        | *294E82CD7B984FDC876A31730BE3D6AFCCE8F2B8 | localhost                           |
| root        | *294E82CD7B984FDC876A31730BE3D6AFCCE8F2B8 | ds-vm-node172\_17\_7\_1.cluster.com |
| root        | *294E82CD7B984FDC876A31730BE3D6AFCCE8F2B8 | 127.0.0.1                           |
| root        | *294E82CD7B984FDC876A31730BE3D6AFCCE8F2B8 | ::1                                 |
| root        | *294E82CD7B984FDC876A31730BE3D6AFCCE8F2B8 | %                                   |
| replication | *CA1AC595BA3869448098481FB70496E9D15B814D | %                                   |
+-------------+-------------------------------------------+-------------------------------------+
[root@DT_Node-172_17_7_1 ~]# chkconfig mysqld on && chkconfig 

注:该输出结果只显示 SysV 服务,并不包含
原生 systemd 服务。SysV 配置数据
可能被原生 systemd 配置覆盖。 

      要列出 systemd 服务,请执行 'systemctl list-unit-files'。
      查看在具体 target 启用的服务请执行
      'systemctl list-dependencies [target]'。

mysqld          0:关    1:关    2:开    3:开    4:开    5:开    6:关
netconsole      0:关    1:关    2:关    3:关    4:关    5:关    6:关
network         0:关    1:关    2:开    3:开    4:开    5:开    6:关
[root@DT_Node-172_17_7_2 ~]# 

2、配置防火墙iptables (在做系统初始化的时候已经将firewalld换成了iptables)(所有controller节点执行)

[root@DT_Node-172_17_7_1 ~]# grep OpenStack_Galare_Port /etc/sysconfig/iptables
-A INPUT -s 172.17.7.1/32                 -p tcp -m tcp -m state --state NEW -m multiport --dports 3306,4444,4567,4568 -m comment --comment "OpenStack_Galare_Port" -j ACCEPT
-A INPUT -s 172.17.7.2/32                 -p tcp -m tcp -m state --state NEW -m multiport --dports 3306,4444,4567,4568 -m comment --comment "OpenStack_Galare_Port" -j ACCEPT
-A INPUT -s 172.17.7.3/32                 -p tcp -m tcp -m state --state NEW -m multiport --dports 3306,4444,4567,4568 -m comment --comment "OpenStack_Galare_Port" -j ACCEPT
-A INPUT -s 172.17.7.1/32                 -p udp -m udp -m state --state NEW -m multiport --dports 4567 -m comment --comment "OpenStack_Galare_Port" -j ACCEPT
-A INPUT -s 172.17.7.2/32                 -p udp -m udp -m state --state NEW -m multiport --dports 4567 -m comment --comment "OpenStack_Galare_Port" -j ACCEPT
-A INPUT -s 172.17.7.3/32                 -p udp -m udp -m state --state NEW -m multiport --dports 4567 -m comment --comment "OpenStack_Galare_Port" -j ACCEPT
[root@DT_Node-172_17_7_1 ~]# iptables -nvxL --lin | grep OpenStack_Galare_Port
6           0        0 ACCEPT     tcp  --  *      *       172.17.7.1           0.0.0.0/0            tcp state NEW multiport dports 3306,4444,4567,4568 /* OpenStack_Galare_Port */
7           0        0 ACCEPT     tcp  --  *      *       172.17.7.2           0.0.0.0/0            tcp state NEW multiport dports 3306,4444,4567,4568 /* OpenStack_Galare_Port */
8           0        0 ACCEPT     tcp  --  *      *       172.17.7.3           0.0.0.0/0            tcp state NEW multiport dports 3306,4444,4567,4568 /* OpenStack_Galare_Port */
9           0        0 ACCEPT     udp  --  *      *       172.17.7.1           0.0.0.0/0            udp state NEW multiport dports 4567 /* OpenStack_Galare_Port */
10          0        0 ACCEPT     udp  --  *      *       172.17.7.2           0.0.0.0/0            udp state NEW multiport dports 4567 /* OpenStack_Galare_Port */
11          0        0 ACCEPT     udp  --  *      *       172.17.7.3           0.0.0.0/0            udp state NEW multiport dports 4567 /* OpenStack_Galare_Port */
[root@DT_Node-172_17_7_1 ~]#

3、配置Galare集群

a、修改配置文件,让节点加入集群 (controller1节点操作)

[root@DT_Node-172_17_7_1 ~]# egrep -v '^$|^#' /etc/my.cnf| egrep 'wsrep_cluster_address|wsrep_cluster_name|wsrep_node_address|wsrep_node_name|wsrep_sst_auth'
wsrep_cluster_address="gcomm://"
wsrep_cluster_name='OpenStack_Galare_Cluster1'
wsrep_node_address='172.17.7.1'
wsrep_node_name='node1'
wsrep_sst_auth=replication:MWIxOWIzZWEyZjY5N2ZjMGI0
[root@DT_Node-172_17_7_1 ~]#
[root@DT_Node-172_17_7_1 ~]# service mysqld restart 
Shutting down MySQL..... SUCCESS! 
Starting MySQL.180909 18:17:39 mysqld_safe Adding '/usr/local/lib/libjemalloc.so' to LD_PRELOAD for mysqld
180909 18:17:39 mysqld_safe Logging to '/data/mariadb/mysql-error.log'.
180909 18:17:39 mysqld_safe Starting mysqld daemon with databases from /data/mariadb
. SUCCESS! 
[root@DT_Node-172_17_7_1 ~]# 

b、修改配置文件(controller2节点操作)

[root@DT_Node-172_17_7_2 ~]# egrep -v '^$|^#' /etc/my.cnf| egrep 'wsrep_cluster_address|wsrep_cluster_name|wsrep_node_address|wsrep_node_name|wsrep_sst_auth'
wsrep_cluster_address="gcomm://172.17.7.1,172.17.7.2,172.17.7.3"
wsrep_cluster_name='OpenStack_Galare_Cluster1'
wsrep_node_address='172.17.7.2'
wsrep_node_name='node2'
wsrep_sst_auth=replication:MWIxOWIzZWEyZjY5N2ZjMGI0
[root@DT_Node-172_17_7_2 ~]# service mysqld restart      
 ERROR! MySQL server PID file could not be found!
Starting MySQL.180909 18:19:50 mysqld_safe Adding '/usr/local/lib/libjemalloc.so' to LD_PRELOAD for mysqld
180909 18:19:50 mysqld_safe Logging to '/data/mariadb/mysql-error.log'.
180909 18:19:50 mysqld_safe Starting mysqld daemon with databases from /data/mariadb
..SST in progress, setting sleep higher... SUCCESS! 
[root@DT_Node-172_17_7_2 ~]# 

c、修改配置文件(controller3节点操作)

[root@DT_Node-172_17_7_3 ~]# egrep -v '^$|^#' /etc/my.cnf| egrep 'wsrep_cluster_address|wsrep_cluster_name|wsrep_node_address|wsrep_node_name|wsrep_sst_auth'
wsrep_cluster_address="gcomm://172.17.7.1,172.17.7.2,172.17.7.3"
wsrep_cluster_name='OpenStack_Galare_Cluster1'
wsrep_node_address='172.17.7.3'
wsrep_node_name='node3'
wsrep_sst_auth=replication:MWIxOWIzZWEyZjY5N2ZjMGI0
[root@DT_Node-172_17_7_3 ~]# service mysqld restart 
Shutting down MySQL...... SUCCESS! 
Starting MySQL.180909 18:23:46 mysqld_safe Adding '/usr/local/lib/libjemalloc.so' to LD_PRELOAD for mysqld
180909 18:23:46 mysqld_safe Logging to '/data/mariadb/mysql-error.log'.
180909 18:23:46 mysqld_safe Starting mysqld daemon with databases from /data/mariadb
..SST in progress, setting sleep higher... SUCCESS! 
[root@DT_Node-172_17_7_3 ~]# 

d、最后修改一次controller1节点配置文件(controller1节点操作)

[root@DT_Node-172_17_7_1 ~]# 
[root@DT_Node-172_17_7_1 ~]# sed -i 's@^wsrep_cluster_address.*@wsrep_cluster_address="gcomm://172.17.7.1,172.17.7.2,172.17.7.3"@' /etc/my.cnf
[root@DT_Node-172_17_7_1 ~]# egrep -v '^$|^#' /etc/my.cnf| egrep 'wsrep_cluster_address'                                                      
wsrep_cluster_address="gcomm://172.17.7.1,172.17.7.2,172.17.7.3"
[root@DT_Node-172_17_7_1 ~]# service mysqld restart 
Shutting down MySQL..... SUCCESS! 
Starting MySQL.180909 18:27:34 mysqld_safe Adding '/usr/local/lib/libjemalloc.so' to LD_PRELOAD for mysqld
180909 18:27:34 mysqld_safe Logging to '/data/mariadb/mysql-error.log'.
180909 18:27:34 mysqld_safe Starting mysqld daemon with databases from /data/mariadb
. SUCCESS! 
[root@DT_Node-172_17_7_1 ~]#

e、检查MariaDB Galare集群状况

[root@DT_Node-172_17_7_1 ~]# mysql -uroot -pYTI1MTg4NGZiMGEzZTZmYTEw -e "SHOW STATUS LIKE '%wsrep%';"               
+------------------------------+--------------------------------------------------+
| Variable_name                | Value                                            |
+------------------------------+--------------------------------------------------+
| wsrep_local_state_uuid       | ebcf1e56-b413-11e8-8e7a-0666ba590539             |
| wsrep_protocol_version       | 7                                                |
| wsrep_last_committed         | 18                                               |
| wsrep_replicated             | 0                                                |
| wsrep_replicated_bytes       | 0                                                |
| wsrep_repl_keys              | 0                                                |
| wsrep_repl_keys_bytes        | 0                                                |
| wsrep_repl_data_bytes        | 0                                                |
| wsrep_repl_other_bytes       | 0                                                |
| wsrep_received               | 2                                                |
| wsrep_received_bytes         | 273                                              |
| wsrep_local_commits          | 0                                                |
| wsrep_local_cert_failures    | 0                                                |
| wsrep_local_replays          | 0                                                |
| wsrep_local_send_queue       | 0                                                |
| wsrep_local_send_queue_max   | 1                                                |
| wsrep_local_send_queue_min   | 0                                                |
| wsrep_local_send_queue_avg   | 0.000000                                         |
| wsrep_local_recv_queue       | 0                                                |
| wsrep_local_recv_queue_max   | 1                                                |
| wsrep_local_recv_queue_min   | 0                                                |
| wsrep_local_recv_queue_avg   | 0.000000                                         |
| wsrep_local_cached_downto    | 18446744073709551615                             |
| wsrep_flow_control_paused_ns | 0                                                |
| wsrep_flow_control_paused    | 0.000000                                         |
| wsrep_flow_control_sent      | 0                                                |
| wsrep_flow_control_recv      | 0                                                |
| wsrep_cert_deps_distance     | 0.000000                                         |
| wsrep_apply_oooe             | 0.000000                                         |
| wsrep_apply_oool             | 0.000000                                         |
| wsrep_apply_window           | 0.000000                                         |
| wsrep_commit_oooe            | 0.000000                                         |
| wsrep_commit_oool            | 0.000000                                         |
| wsrep_commit_window          | 0.000000                                         |
| wsrep_local_state            | 4                                                |
| wsrep_local_state_comment    | Synced                                           |
| wsrep_cert_index_size        | 0                                                |
| wsrep_causal_reads           | 0                                                |
| wsrep_cert_interval          | 0.000000                                         |
| wsrep_incoming_addresses     | 172.17.7.3:3306,172.17.7.2:3306,172.17.7.1:3306  |
| wsrep_desync_count           | 0                                                |
| wsrep_evs_delayed            |                                                  |
| wsrep_evs_evict_list         |                                                  |
| wsrep_evs_repl_latency       | 0.000364925/0.000667352/0.00109162/0.000308928/3 |
| wsrep_evs_state              | OPERATIONAL                                      |
| wsrep_gcomm_uuid             | fa2d022a-b41a-11e8-9756-afee0ead45e3             |
| wsrep_cluster_conf_id        | 5                                                |
| wsrep_cluster_size           | 3                                                |
| wsrep_cluster_state_uuid     | ebcf1e56-b413-11e8-8e7a-0666ba590539             |
| wsrep_cluster_status         | Primary                                          |
| wsrep_connected              | ON                                               |
| wsrep_local_bf_aborts        | 0                                                |
| wsrep_local_index            | 2                                                |
| wsrep_provider_name          | Galera                                           |
| wsrep_provider_vendor        | Codership Oy <info@codership.com>                |
| wsrep_provider_version       | 25.3.22(r3764)                                   |
| wsrep_ready                  | ON                                               |
| wsrep_thread_count           | 17                                               |
+------------------------------+--------------------------------------------------+
[root@DT_Node-172_17_7_1 ~]# 

OpenStack-Queens详细安装部署(一)基础配置

f、设置MariaDB心跳检测clustercheck

f.1、clustercheck检测脚本准备 (所有数据库节点操作)

[root@DT_Node-172_17_7_1 ~]# curl -Lk https://raw.githubusercontent.com/olafz/percona-clustercheck/master/clustercheck > /usr/bin/clustercheck
[root@DT_Node-172_17_7_1 ~]# chmod +x /usr/bin/clustercheck
[root@DT_Node-172_17_7_2 ~]# sed -ri 's/^(MYSQL_USERNAME="\$\{MYSQL_USERNAME).*(}")/\1-clustercheckuser\2/' /usr/bin/clustercheck
[root@DT_Node-172_17_7_1 ~]# sed -ri 's/(MYSQL_PASSWORD="\$\{MYSQL_PASSWORD-).*(}")/\1OTNiYTEyMjU3M2I2NDQ2MjUz\2/' /usr/bin/clustercheck
# 如果你的mysql命令不在想他默认PATH目录里面需要给clustercheck指定PATH
[root@DT_Node-172_17_7_3 ~]# head -15 /usr/bin/clustercheck
#!/bin/bash
#
# Script to make a proxy (ie HAProxy) capable of monitoring Percona XtraDB Cluster nodes properly
#
# Author: Olaf van Zandwijk <olaf.vanzandwijk@nedap.com>
# Author: Raghavendra Prabhu <raghavendra.prabhu@percona.com>
#
# Documentation and download: https://github.com/olafz/percona-clustercheck
#
# Based on the original script from Unai Rodriguez
#

PATH=/usr/local/mariadb/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin

if [[ $1 == '-h' || $1 == '--help' ]];then
[root@DT_Node-172_17_7_3 ~]# 
# 如果你不是用软件包默认的账号密码那么需要些下面的配置文件
[root@DT_Node-172_17_7_3 ~]# echo 'MYSQL_USERNAME="clustercheckuser"
MYSQL_PASSWORD="OTNiYTEyMjU3M2I2NDQ2MjUz"
MYSQL_HOST="localhost"
MYSQL_PORT="3306"' > /etc/sysconfig/clustercheck
[root@DT_Node-172_17_7_3 ~]# 

f.2、创建心跳检测用户 (任意数据库节点操作)

[root@DT_Node-172_17_7_1 ~]# mysql -uroot -pYTI1MTg4NGZiMGEzZTZmYTEw -e "GRANT PROCESS ON *.* TO 'clustercheckuser'@'localhost' IDENTIFIED BY 'OTNiYTEyMjU3M2I2NDQ2MjUz';FLUSH PRIVILEGES;"

f.3、检测配置文件 (所有数据库节点操作)

[root@DT_Node-172_17_7_1 ~]# cat /etc/xinetd.d/mysqlchk
service mysqlchk
{
   bind = 172.17.7.1
   port = 9200
   disable = no
   socket_type = stream
   protocol = tcp
   wait = no
   user = root
   group = root
   groups = yes
   server = /usr/bin/clustercheck
   type = UNLISTED
   per_source = UNLIMITED
   log_on_success =
   log_on_failure = HOST
   flags = REUSE
}
[root@DT_Node-172_17_7_1 ~]# 

f.4、启动心跳检测服务 (所有数据库节点操作)

[root@DT_Node-172_17_7_1 ~]# yum install -y xinetd
[root@DT_Node-172_17_7_1 ~]# sed -i 's/.*9200\/tcp.*/#&\nmysqlchk        9200\/tcp                # mysqlchk/' /etc/services
[root@DT_Node-172_17_7_1 ~]# systemctl daemon-reload
[root@DT_Node-172_17_7_1 ~]# systemctl enable xinetd
[root@DT_Node-172_17_7_1 ~]# systemctl start xinetd
[root@DT_Node-172_17_7_1 ~]# ss -tnl| grep 9200
LISTEN     0      64           *:9200                     *:*                  
[root@DT_Node-172_17_7_1 ~]# 

f.5、测试系统检测

[root@DT_Node-172_17_7_1 ~]# clustercheck 
HTTP/1.1 200 OK
Content-Type: text/plain
Connection: close
Content-Length: 40

Percona XtraDB Cluster Node is synced.
[root@DT_Node-172_17_7_1 ~]#

OpenStack-Queens详细安装部署(一)基础配置

OpenStack-Queens详细安装部署(一)基础配置

三、安装RabbitMQ集群

1. 安装rabbitmq (所有controller节点操作)

[root@DT_Node-172_17_7_1 ~]# yum install erlang rabbitmq-server -y
[root@DT_Node-172_17_7_1 ~]# systemctl enable rabbitmq-server.service 

配置防火墙规则 (所有controller节点操作)

[root@DT_Node-172_17_7_1 ~]# grep RabbitMQ /etc/sysconfig/iptables
-A INPUT -s 172.17.7.1/32                 -p tcp -m tcp -m state --state NEW -m multiport --dports 4369,5671:5673,15672:15673,25672 -m comment --comment "OpenStack_RabbitMQ_Port" -j ACCEPT
-A INPUT -s 172.17.7.2/32                 -p tcp -m tcp -m state --state NEW -m multiport --dports 4369,5671:5673,15672:15673,25672 -m comment --comment "OpenStack_RabbitMQ_Port" -j ACCEPT
-A INPUT -s 172.17.7.3/32                 -p tcp -m tcp -m state --state NEW -m multiport --dports 4369,5671:5673,15672:15673,25672 -m comment --comment "OpenStack_RabbitMQ_Port" -j ACCEPT
[root@DT_Node-172_17_7_1 ~]# iptables -nvxL --lin | grep RabbitMQ 
12          0        0 ACCEPT     tcp  --  *      *       172.17.7.1           0.0.0.0/0            tcp state NEW multiport dports 4369,5671:5673,15672:15673,25672 /* OpenStack_RabbitMQ_Port */
13          0        0 ACCEPT     tcp  --  *      *       172.17.7.2           0.0.0.0/0            tcp state NEW multiport dports 4369,5671:5673,15672:15673,25672 /* OpenStack_RabbitMQ_Port */
14          0        0 ACCEPT     tcp  --  *      *       172.17.7.3           0.0.0.0/0            tcp state NEW multiport dports 4369,5671:5673,15672:15673,25672 /* OpenStack_RabbitMQ_Port */
[root@DT_Node-172_17_7_1 ~]# 

2. 构建rabbitmq集群

a.1、在任意一RabbitMQ节点启动RabbitMQ (这里用的是controller1节点)

[root@DT_Node-172_17_7_1 ~]# systemctl start rabbitmq-server.service
[root@DT_Node-172_17_7_1 ~]# rabbitmqctl cluster_status
Cluster status of node 'rabbit@DS-VM-Node172_17_7_1' ...
[{nodes,[{disc,['rabbit@DS-VM-Node172_17_7_1']}]},
 {running_nodes,['rabbit@DS-VM-Node172_17_7_1']},
 {cluster_name,<<"rabbit@DS-VM-Node172_17_7_1">>},
 {partitions,[]},
 {alarms,[{'rabbit@DS-VM-Node172_17_7_1',[]}]}]
[root@DT_Node-172_17_7_1 ~]#

a.2、# 分发.erlang.cookie

[root@DT_Node-172_17_7_1 ~]# scp -P22992 /var/lib/rabbitmq/.erlang.cookie root@172.17.7.2:/var/lib/rabbitmq/
[root@DT_Node-172_17_7_1 ~]# scp -P22992 /var/lib/rabbitmq/.erlang.cookie root@172.17.7.3:/var/lib/rabbitmq/

a.3、修改controller02/03节点.erlang.cookie文件的用户/组

[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.2 'chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie' 
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.3 'chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie' 

a.4、注意修改全部控制节点.erlang.cookie文件的权限,默认即400权限,可不修改

[root@DT_Node-172_17_7_1 ~]# ls -lha /var/lib/rabbitmq/
总用量 8.0K
drwxr-x---   3 rabbitmq rabbitmq   42 9月   9 19:15 .
drwxr-xr-x. 31 root     root     4.0K 9月   9 19:14 ..
-r--------   1 rabbitmq rabbitmq   20 9月   9 00:00 .erlang.cookie
drwxr-xr-x   4 rabbitmq rabbitmq  130 9月   9 19:15 mnesia
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.2 'ls -lha /var/lib/rabbitmq/'  
总用量 8.0K
drwxr-x---   2 rabbitmq rabbitmq   28 9月   9 19:17 .
drwxr-xr-x. 31 root     root     4.0K 9月   9 19:14 ..
-r--------   1 rabbitmq rabbitmq   20 9月   9 19:17 .erlang.cookie
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.3 'ls -lha /var/lib/rabbitmq/' 
总用量 8.0K
drwxr-x---   2 rabbitmq rabbitmq   28 9月   9 19:17 .
drwxr-xr-x. 31 root     root     4.0K 9月   9 19:13 ..
-r--------   1 rabbitmq rabbitmq   20 9月   9 19:17 .erlang.cookie
[root@DT_Node-172_17_7_1 ~]#

a.5、启动controller02/03节点的rabbitmq服务

[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.2 'systemctl start rabbitmq-server'
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.3 'systemctl start rabbitmq-server'

a.5、构建集群,controller2/3节点以ram节点的形式加入集群

[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.2 'rabbitmqctl stop_app'           
Stopping node rabbit@controller2 ...
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.2 'rabbitmqctl join_cluster --ram rabbit@controller1' 
Clustering node rabbit@controller2 with rabbit@controller1 ...
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.2 'rabbitmqctl start_app' 
Starting node rabbit@controller2 ...
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.3 'rabbitmqctl stop_app'     
Stopping node rabbit@controller3 ...
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.3 'rabbitmqctl join_cluster --ram rabbit@controller1' 
Clustering node rabbit@controller3 with rabbit@controller1 ...
[root@DT_Node-172_17_7_1 ~]# ssh -p22992 root@172.17.7.3 'rabbitmqctl start_app' 
Starting node rabbit@controller3 ...
[root@DT_Node-172_17_7_1 ~]# 

a.6、任意节点可验证集群状态

[root@DT_Node-172_17_7_1 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller1 ...
[{nodes,[{disc,[rabbit@controller1]},
         {ram,[rabbit@controller3,rabbit@controller2]}]},
 {running_nodes,[rabbit@controller3,rabbit@controller2,rabbit@controller1]},
 {cluster_name,<<"rabbit@controller1">>},
 {partitions,[]},
 {alarms,[{rabbit@controller3,[]},
          {rabbit@controller2,[]},
          {rabbit@controller1,[]}]}]
[root@DT_Node-172_17_7_1 ~]# 

OpenStack-Queens详细安装部署(一)基础配置

3、rabbitmq账号

a、在任意节点新建账号并设置密码,以controller1节点为例

[root@DT_Node-172_17_7_1 ~]# rabbitmqctl add_user openstack MWY1NTA5NGYzYmM1MWQ2MTFk
Creating user "openstack" ...
[root@DT_Node-172_17_7_1 ~]# 

b、设置新建账号的状态

[root@DT_Node-172_17_7_1 ~]# rabbitmqctl set_user_tags openstack administrator
Setting tags for user "openstack" to [administrator] ...
[root@DT_Node-172_17_7_1 ~]# 

c、设置新建账号的权限

[root@DT_Node-172_17_7_1 ~]# rabbitmqctl set_permissions -p "/" openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/" ...
[root@DT_Node-172_17_7_1 ~]# 

d、查看账号

[root@DT_Node-172_17_7_1 ~]# rabbitmqctl list_users
Listing users ...
openstack       [administrator]
guest   [administrator]
[root@DT_Node-172_17_7_1 ~]# 

4. 镜像队列ha

# 设置镜像队列高可用

[root@DT_Node-172_17_7_1 ~]# rabbitmqctl set_policy ha-all "^" '{"ha-mode":"all"}'
Setting policy "ha-all" for pattern "^" to "{\"ha-mode\":\"all\"}" with priority "0" ...
[root@DT_Node-172_17_7_1 ~]# 

# 查看镜像队列策略

[root@DT_Node-172_17_7_1 ~]# rabbitmqctl list_policies 
Listing policies ...
/       ha-all  all     ^       {"ha-mode":"all"}       0
[root@DT_Node-172_17_7_1 ~]# 

5. 安装web管理插件

在全部控制节点安装web管理插件,以controller1节点为例

[root@DT_Node-172_17_7_1 ~]# for i in 1 2 3;do ssh -p22992 root@172.17.7.$i 'rabbitmq-plugins enable rabbitmq_management';done 
The following plugins have been enabled:
  mochiweb
  webmachine
  rabbitmq_web_dispatch
  amqp_client
  rabbitmq_management_agent
  rabbitmq_management

Applying plugin configuration to rabbit@controller1... started 6 plugins.\
The following plugins have been enabled:
  mochiweb
  webmachine
  rabbitmq_web_dispatch
  amqp_client
  rabbitmq_management_agent
  rabbitmq_management

Applying plugin configuration to rabbit@controller2... started 6 plugins.
The following plugins have been enabled:
  mochiweb
  webmachine
  rabbitmq_web_dispatch
  amqp_client
  rabbitmq_management_agent
  rabbitmq_management

Applying plugin configuration to rabbit@controller3... started 6 plugins.
[root@DT_Node-172_17_7_1 ~]# 

访问任意节点,如:http://172.17.7.1:15672
OpenStack-Queens详细安装部署(一)基础配置

四、Memcached集群

Memcached是无状态的,各控制节点独立部署,openstack各服务模块统一调用多个控制节点的memcached服务即可。

1. 安装memcached (所有controller节点上操作)

# 在全部控制节点安装memcached

[root@DT_Node-172_17_7_1 ~]# yum install memcached python-memcached -y

2. 设置memcached (所有controller节点上操作)

# 在全部安装memcached服务的节点设置服务监听地址

[root@DT_Node-172_17_7_1 ~]# sed -i 's|127.0.0.1,::1|0.0.0.0|g' /etc/sysconfig/memcached

3. 开机启动 (所有controller节点上操作)

[root@DT_Node-172_17_7_1 ~]# systemctl enable memcached.service
[root@DT_Node-172_17_7_1 ~]# systemctl start memcached.service
[root@DT_Node-172_17_7_1 ~]# systemctl status memcached.service
lookback
  • 本文由 发表于 2018年9月7日14:51:39
  • 除非特殊声明,本站文章均为原创,转载请务必保留本文链接
匿名

发表评论

匿名网友 填写信息

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen: