防火墙规则
*filter :INPUT DROP [0:0] :FORWARD ACCEPT [0:0] :OUTPUT ACCEPT [0:0] -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT -A INPUT -p icmp -j ACCEPT -A INPUT -i lo -j ACCEPT -A INPUT -p tcp -m tcp -m state --state NEW -m multiport --dports 22,22992 -m comment --comment "SSH_PORT" -j ACCEPT -A INPUT -p tcp -m tcp -m state --state NEW -m multiport --dports 5900:6000 -m comment --comment "VNC_PORT" -j ACCEPT -A INPUT -j REJECT --reject-with icmp-host-prohibited COMMIT
内核参数
fs.file-max = 65535 net.ipv4.tcp_max_tw_buckets = 60000 net.ipv4.tcp_sack = 1 net.ipv4.tcp_window_scaling = 1 net.ipv4.tcp_rmem = 4096 87380 4194304 net.ipv4.tcp_wmem = 4096 16384 4194304 net.ipv4.tcp_max_syn_backlog = 65536 net.core.netdev_max_backlog = 32768 net.core.somaxconn = 32768 net.core.wmem_default = 8388608 net.core.rmem_default = 8388608 net.core.rmem_max = 16777216 net.core.wmem_max = 16777216 net.ipv4.tcp_timestamps = 0 net.ipv4.tcp_synack_retries = 2 net.ipv4.tcp_syn_retries = 2 net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_mem = 94500000 915000000 927000000 net.ipv4.tcp_max_orphans = 3276800 net.ipv4.ip_local_port_range = 1024 65000 net.nf_conntrack_max = 6553500 net.netfilter.nf_conntrack_max = 6553500 net.netfilter.nf_conntrack_tcp_timeout_close_wait = 60 net.netfilter.nf_conntrack_tcp_timeout_fin_wait = 120 net.netfilter.nf_conntrack_tcp_timeout_time_wait = 120 net.netfilter.nf_conntrack_tcp_timeout_established = 3600 net.ipv4.conf.all.rp_filter = 2 net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1
新增ceph YUM源
curl -Lk http://git.ds.com/lookback/private-mirrors/raw/master/ceph.repo > /etc/yum.repos.d/ceph.repo
按照必要软件包
yum install -y centos-release-openstack-queens && yum upgrade -y && yum install -y python-openstackclient yum install -y python-openstackclient openstack-utils openstack-selinux && yum install -y openstack-nova-compute yum install -y openstack-neutron-linuxbridge ebtables ipset yum install -y openstack-cinder targetcli python-keystone yum install -y python-rbd ceph-common
修改当前节点Hosts
cat >>/etc/hosts <<EOF 10.10.10.250 mirrors.ds.com 10.0.1.179 git.ds.com # Controller-Node 172.17.5.1 controller1 172.17.5.2 controller2 172.17.5.3 controller3 172.17.5.100 controller #HA VIP # Compute-Node & Storage-Node 172.17.5.9 compute1 ceph-osd1 172.17.5.10 compute2 ceph-osd2 172.17.5.11 compute3 ceph-osd3 172.17.5.12 compute4 ceph-osd4 172.17.5.13 compute5 EOF # 172.17.5.13 compute5 修改成对应信息
修改集群中其他节点Hosts
echo '172.17.5.13 compute5' >> /etc/hosts # 172.17.5.13 compute5 修改成对应信息
修改Hostname
#修改/etc/profile文件 for i in static pretty transient; do hostnamectl set-hostname compute5 --$i; done /etc/profile # compute5 修改成对应信息 source /etc/profile
修改nova配置文件
cp /etc/nova/nova.conf{,_original} # 备份原配置文件 # my_ip=172.17.5.13 修改成对应信息 cat >/etc/nova/nova.conf <<EOF [DEFAULT] my_ip=172.17.5.13 use_neutron=true firewall_driver=nova.virt.firewall.NoopFirewallDriver enabled_apis=osapi_compute,metadata transport_url=rabbit://openstack:[email protected]:5672,openstack:[email protected]:5672,openstack:[email protected]:5672 allow_resize_to_same_host=True scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter [api] auth_strategy=keystone [api_database] auth_strategy = keystone [api_database] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller1:11211,controller2:11211,controller3:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = ZTQ0NTdjOTI1YzY1Zjg2ZTE2 [barbican] [cache] [cells] [cinder] [compute] [conductor] [console] [consoleauth] [cors] [crypto] [database] [devices] [ephemeral_storage_encryption] [filter_scheduler] [glance] api_servers=http://controller:9292 [guestfs] [healthcheck] [hyperv] [ironic] [key_manager] [keystone] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller1:11211,controller2:11211,controller3:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = MzkwOWI1Nzc3Mzg5NTg2MTk4 [libvirt] images_rbd_pool=nova-ephemeral images_rbd_ceph_conf = /etc/ceph/ceph.conf images_type=rbd rbd_secret_uuid=2909eaae-657c-4e11-bba8-e4e5504f04a2 rbd_user=nova-ephemeral virt_type=kvm inject_password=True inject_key=True inject_partition=-1 cpu_mode=host-model vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver hw_machine_type = x86_64=pc-i440fx-rhel7.2.0 [matchmaker_redis] [metrics] [mks] [neutron] url=http://controller:9696 auth_type=password auth_url=http://controller:35357 region_name=RegionDT01 project_name=service project_domain_name=default username=neutron user_domain_name=default password=MzkwOWI1Nzc3Mzg5NTg2MTk4 [notifications] [osapi_v21] [oslo_concurrency] lock_path=/var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [pci] [placement] os_region_name=RegionDT01 auth_type=password auth_url=http://controller:35357/v3 project_name=service project_domain_name=Default username=placement user_domain_name=Default password=ZTQ0NTdjOTI1YzY1Zjg2ZTE2 [quota] [rdp] [remote_debug] [scheduler] [serial_console] [service_user] [spice] [upgrade_levels] [vault] [vendordata_dynamic_auth] [vmware] [vnc] #enabled=true ##vncserver_listen=0.0.0.0 ##vncserver_proxyclient_address=$my_ip ##novncproxy_base_url=http://172.17.5.100:6080/vnc_auto.html vnc_enable = true novnc_enable = true vncserver_listen=0.0.0.0 vncserver_proxyclient_address=$my_ip novncproxy_base_url=http://172.17.5.100:6080/vnc_auto.html #novncproxy_base_url=http://172.17.5.2:6080/vnc_auto.html novncproxy_port=6080 [workarounds] [wsgi] [xenserver] [xvp] EOF
修改neutron配置文件
cp /etc/neutron/neutron.conf{,_original} # 备份原配置文件 # bind_host = 172.17.5.13 修改为对应信息 cat >/etc/neutron/neutron.conf<<EOF [DEFAULT] state_path = /var/lib/neutron bind_host = 172.17.5.13 auth_strategy = keystone transport_url=rabbit://openstack:[email protected]:5672,openstack:[email protected]:5672,openstack:[email protected]:5672 [agent] [cors] [database] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller1:11211,controller2:11211,controller3:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = MzkwOWI1Nzc3Mzg5NTg2MTk4 [matchmaker_redis] [nova] [oslo_concurrency] lock_path = $state_path/lock [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [quotas] [ssl]
修改linuxbridge_agent配置文件
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,_original} # 备份原配置文件 cat > /etc/neutron/plugins/ml2/linuxbridge_agent.ini<<EOF [DEFAULT] [agent] [linux_bridge] physical_interface_mappings = vlan:eth1 [network_log] [securitygroup] enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver [vxlan] enable_vxlan = false local_ip = 10.0.0.41 l2_population = true EOF
从其他Comput节点获取Ceph配置文件
scp -r [email protected]:/etc/ceph /etc chgrp -R nova /etc/ceph chmod -R 0640 /etc/ceph 集成Ceph virsh secret-define --file /etc/ceph/ceph-nova.xml virsh secret-set-value --secret $(awk -F'>|<' 'NR==2{print $3}' /etc/ceph/ceph-nova.xml) --base64 $(cat /etc/ceph/client.nova-ephemeral.key) virsh secret-define --file /etc/ceph/ceph.xml virsh secret-set-value --secret $(awk -F'>|<' 'NR==2{print $3}' /etc/ceph/ceph.xml) --base64 $(cat /etc/ceph/client.cinder-volumes.key)
设置开机启动
systemctl enable libvirtd.service openstack-nova-compute.service systemctl enable neutron-linuxbridge-agent.service
启动服务
systemctl restart neutron-linuxbridge-agent.service openstack-nova-compute.service libvirtd.service systemctl status neutron-linuxbridge-agent.service openstack-nova-compute.service libvirtd.service
测试
[openstack-admin]-[[email protected]_Node-172_17_5_1 ~]# openstack compute service list --service nova-compute +-----+--------------+----------+------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +-----+--------------+----------+------+---------+-------+----------------------------+ | 87 | nova-compute | compute4 | nova | enabled | up | 2018-12-12T05:07:16.000000 | | 90 | nova-compute | compute3 | nova | enabled | up | 2018-12-12T05:07:16.000000 | | 96 | nova-compute | compute1 | nova | enabled | up | 2018-12-12T05:07:23.000000 | | 99 | nova-compute | compute2 | nova | enabled | up | 2018-12-12T05:07:17.000000 | | 130 | nova-compute | compute5 | nova | enabled | up | 2018-12-12T05:07:17.000000 | +-----+--------------+----------+------+---------+-------+----------------------------+ [openstack-admin]-[[email protected]_Node-172_17_5_1 ~]# openstack availability zone list --compute +-----------+-------------+ | Zone Name | Zone Status | +-----------+-------------+ | internal | available | | nova | available | +-----------+-------------+ [openstack-admin]-[[email protected]_Node-172_17_5_1 ~]# nova boot --availability-zone nova:compute5 --flavor L-2c2g40g --image CentOS-7.5.1804-20181016 --nic net-id=9e66ff7b-5817-43ba-b1e2-bfa69569ab98,v4-fixed-ip=172.30.5.111 'benyoo-test5-20181212' +--------------------------------------+-----------------------------------------------------------------+ | Property | Value | +--------------------------------------+-----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | - | | OS-EXT-SRV-ATTR:hostname | benyoo-test5-20181212 | | OS-EXT-SRV-ATTR:hypervisor_hostname | - | | OS-EXT-SRV-ATTR:instance_name | | | OS-EXT-SRV-ATTR:kernel_id | | | OS-EXT-SRV-ATTR:launch_index | 0 | | OS-EXT-SRV-ATTR:ramdisk_id | | | OS-EXT-SRV-ATTR:reservation_id | r-zy2wg10b | | OS-EXT-SRV-ATTR:root_device_name | - | | OS-EXT-SRV-ATTR:user_data | - | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | zg9LNHeG6ofH | | config_drive | | | created | 2018-12-12T05:08:06Z | | description | - | | flavor:disk | 40 | | flavor:ephemeral | 0 | | flavor:extra_specs | {} | | flavor:original_name | L-2c2g40g | | flavor:ram | 2048 | | flavor:swap | 0 | | flavor:vcpus | 2 | | hostId | | | host_status | | | id | b5e6c79b-6b33-4355-a26c-bb6e38caad84 | | image | CentOS-7.5.1804-20181016 (1e6dc5c7-2118-4d63-81f2-60d55d640cfc) | | key_name | - | | locked | False | | metadata | {} | | name | benyoo-test5-20181212 | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tags | [] | | tenant_id | e5272be1f6674fbcae5489a099f98e24 | | updated | 2018-12-12T05:08:06Z | | user_id | 2b83a7415f1342738607ce5307ea9ff1 | +--------------------------------------+-----------------------------------------------------------------+ [openstack-admin]-[[email protected]_Node-172_17_5_1 ~]# ping 172.30.5.111 -c2 PING 172.30.5.111 (172.30.5.111) 56(84) bytes of data. 64 bytes from 172.30.5.111: icmp_seq=1 ttl=63 time=1.15 ms 64 bytes from 172.30.5.111: icmp_seq=2 ttl=63 time=1.10 ms --- 172.30.5.111 ping statistics --- 2 packets transmitted, 2 received, 0% packet loss, time 1000ms rtt min/avg/max/mdev = 1.108/1.130/1.153/0.040 ms [openstack-admin]-[[email protected]_Node-172_17_5_1 ~]# ssh [email protected] The authenticity of host '172.30.5.111 (172.30.5.111)' can't be established. ECDSA key fingerprint is SHA256:PFVr4Yqq5cKD6fHR2HqLbWrVfD1VaI1IufTMPyr8LxM. ECDSA key fingerprint is MD5:f0:4b:b6:dd:59:6b:26:0d:53:c9:ff:e8:f2:52:be:40. Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added '172.30.5.111' (ECDSA) to the list of known hosts. [email protected]'s password: Permission denied, please try again. [email protected]'s password: ■■■■ ■■■■■■ ■■■■ ■■■■■ ■■■■ ■ ■ ■■ ■■ ■■ ■ ■■ ■■ ■■ ■ ■■ ■■ ■ ■ ■ ■ ■ ■ ■ ■ ■■ ■ ■ ■ ■ ■ ■ ■ ■■ ■ ■ ■ ■ ■ ■ ■ ■■ ■ ■ ■ ■■ ■■ ■ ■ ■■ ■ ■ ■■■■■ ■■ ■ ■ ■■ ■ ■ ■ ■ ■ ■ ■■ ■■ ■ ■ ■ ■ ■ ■ ■ ■■ ■■ ■■ ■ ■■ ■■ ■■■■ ■■ ■■■■ ■ ■■■■ [[email protected]_Node-172_30_5_111 ~]# uptime 13:09:50 up 1 min, 1 user, load average: 0.29, 0.12, 0.04 [[email protected]_Node-172_30_5_111 ~]#
您可以选择一种方式赞助本站
支付宝扫一扫赞助
微信钱包扫描赞助
赏